run hack/update-netparse-cve.sh

This commit is contained in:
Antonio Ojea 2021-08-20 01:16:14 +02:00
parent e9ddac5d85
commit 0cd75e8fec
159 changed files with 1071 additions and 988 deletions

View File

@ -39,6 +39,7 @@ import (
"github.com/blang/semver"
"k8s.io/klog/v2"
netutils "k8s.io/utils/net"
)
var (
@ -307,7 +308,7 @@ func getOrCreateTestCertFiles(certFileName, keyFileName string, spec TestCertSpe
func parseIPList(ips []string) []net.IP {
var netIPs []net.IP
for _, ip := range ips {
netIPs = append(netIPs, net.ParseIP(ip))
netIPs = append(netIPs, netutils.ParseIPSloppy(ip))
}
return netIPs
}
@ -335,7 +336,7 @@ func generateSelfSignedCertKey(host string, alternateIPs []net.IP, alternateDNS
IsCA: true,
}
if ip := net.ParseIP(host); ip != nil {
if ip := netutils.ParseIPSloppy(host); ip != nil {
template.IPAddresses = append(template.IPAddresses, ip)
} else {
template.DNSNames = append(template.DNSNames, host)

View File

@ -96,14 +96,14 @@ func startNodeIpamController(ccmConfig *cloudcontrollerconfig.CompletedConfig, n
// service cidr processing
if len(strings.TrimSpace(nodeIPAMConfig.ServiceCIDR)) != 0 {
_, serviceCIDR, err = net.ParseCIDR(nodeIPAMConfig.ServiceCIDR)
_, serviceCIDR, err = netutils.ParseCIDRSloppy(nodeIPAMConfig.ServiceCIDR)
if err != nil {
klog.Warningf("Unsuccessful parsing of service CIDR %v: %v", nodeIPAMConfig.ServiceCIDR, err)
}
}
if len(strings.TrimSpace(nodeIPAMConfig.SecondaryServiceCIDR)) != 0 {
_, secondaryServiceCIDR, err = net.ParseCIDR(nodeIPAMConfig.SecondaryServiceCIDR)
_, secondaryServiceCIDR, err = netutils.ParseCIDRSloppy(nodeIPAMConfig.SecondaryServiceCIDR)
if err != nil {
klog.Warningf("Unsuccessful parsing of service CIDR %v: %v", nodeIPAMConfig.SecondaryServiceCIDR, err)
}

View File

@ -39,6 +39,7 @@ import (
"k8s.io/kubernetes/pkg/controlplane/reconcilers"
kubeoptions "k8s.io/kubernetes/pkg/kubeapiserver/options"
kubeletclient "k8s.io/kubernetes/pkg/kubelet/client"
netutils "k8s.io/utils/net"
)
func TestAddFlags(t *testing.T) {
@ -124,12 +125,12 @@ func TestAddFlags(t *testing.T) {
// This is a snapshot of expected options parsed by args.
expected := &ServerRunOptions{
ServiceNodePortRange: kubeoptions.DefaultServiceNodePortRange,
ServiceClusterIPRanges: (&net.IPNet{IP: net.ParseIP("192.168.128.0"), Mask: net.CIDRMask(17, 32)}).String(),
ServiceClusterIPRanges: (&net.IPNet{IP: netutils.ParseIPSloppy("192.168.128.0"), Mask: net.CIDRMask(17, 32)}).String(),
MasterCount: 5,
EndpointReconcilerType: string(reconcilers.LeaseEndpointReconcilerType),
AllowPrivileged: false,
GenericServerRunOptions: &apiserveroptions.ServerRunOptions{
AdvertiseAddress: net.ParseIP("192.168.10.10"),
AdvertiseAddress: netutils.ParseIPSloppy("192.168.10.10"),
CorsAllowedOriginList: []string{"10.10.10.100", "10.10.10.200"},
MaxRequestsInFlight: 400,
MaxMutatingRequestsInFlight: 200,
@ -175,7 +176,7 @@ func TestAddFlags(t *testing.T) {
DefaultWatchCacheSize: 100,
},
SecureServing: (&apiserveroptions.SecureServingOptions{
BindAddress: net.ParseIP("192.168.10.20"),
BindAddress: netutils.ParseIPSloppy("192.168.10.20"),
BindPort: 6443,
ServerCert: apiserveroptions.GeneratableKeyCert{
CertDirectory: "/var/run/kubernetes",

View File

@ -23,6 +23,7 @@ import (
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/features"
netutils "k8s.io/utils/net"
)
func makeOptionsWithCIDRs(serviceCIDR string, secondaryServiceCIDR string) *ServerRunOptions {
@ -33,14 +34,14 @@ func makeOptionsWithCIDRs(serviceCIDR string, secondaryServiceCIDR string) *Serv
var primaryCIDR, secondaryCIDR net.IPNet
if len(serviceCIDR) > 0 {
_, cidr, _ := net.ParseCIDR(serviceCIDR)
_, cidr, _ := netutils.ParseCIDRSloppy(serviceCIDR)
if cidr != nil {
primaryCIDR = *(cidr)
}
}
if len(secondaryServiceCIDR) > 0 {
_, cidr, _ := net.ParseCIDR(secondaryServiceCIDR)
_, cidr, _ := netutils.ParseCIDRSloppy(secondaryServiceCIDR)
if cidr != nil {
secondaryCIDR = *(cidr)
}
@ -151,7 +152,7 @@ func TestClusterServiceIPRange(t *testing.T) {
}
func getIPnetFromCIDR(cidr string) *net.IPNet {
_, ipnet, _ := net.ParseCIDR(cidr)
_, ipnet, _ := netutils.ParseCIDRSloppy(cidr)
return ipnet
}

View File

@ -61,6 +61,7 @@ import (
"k8s.io/klog/v2"
aggregatorapiserver "k8s.io/kube-aggregator/pkg/apiserver"
aggregatorscheme "k8s.io/kube-aggregator/pkg/apiserver/scheme"
netutils "k8s.io/utils/net"
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
"k8s.io/kubernetes/pkg/api/legacyscheme"
@ -670,7 +671,7 @@ func getServiceIPAndRanges(serviceClusterIPRanges string) (net.IP, net.IPNet, ne
return apiServerServiceIP, primaryServiceIPRange, net.IPNet{}, nil
}
_, primaryServiceClusterCIDR, err := net.ParseCIDR(serviceClusterIPRangeList[0])
_, primaryServiceClusterCIDR, err := netutils.ParseCIDRSloppy(serviceClusterIPRangeList[0])
if err != nil {
return net.IP{}, net.IPNet{}, net.IPNet{}, fmt.Errorf("service-cluster-ip-range[0] is not a valid cidr")
}
@ -683,7 +684,7 @@ func getServiceIPAndRanges(serviceClusterIPRanges string) (net.IP, net.IPNet, ne
// user provided at least two entries
// note: validation asserts that the list is max of two dual stack entries
if len(serviceClusterIPRangeList) > 1 {
_, secondaryServiceClusterCIDR, err := net.ParseCIDR(serviceClusterIPRangeList[1])
_, secondaryServiceClusterCIDR, err := netutils.ParseCIDRSloppy(serviceClusterIPRangeList[1])
if err != nil {
return net.IP{}, net.IPNet{}, net.IPNet{}, fmt.Errorf("service-cluster-ip-range[1] is not an ip net")
}

View File

@ -127,14 +127,14 @@ func startNodeIpamController(ctx ControllerContext) (http.Handler, bool, error)
// service cidr processing
if len(strings.TrimSpace(ctx.ComponentConfig.NodeIPAMController.ServiceCIDR)) != 0 {
_, serviceCIDR, err = net.ParseCIDR(ctx.ComponentConfig.NodeIPAMController.ServiceCIDR)
_, serviceCIDR, err = netutils.ParseCIDRSloppy(ctx.ComponentConfig.NodeIPAMController.ServiceCIDR)
if err != nil {
klog.Warningf("Unsuccessful parsing of service CIDR %v: %v", ctx.ComponentConfig.NodeIPAMController.ServiceCIDR, err)
}
}
if len(strings.TrimSpace(ctx.ComponentConfig.NodeIPAMController.SecondaryServiceCIDR)) != 0 {
_, secondaryServiceCIDR, err = net.ParseCIDR(ctx.ComponentConfig.NodeIPAMController.SecondaryServiceCIDR)
_, secondaryServiceCIDR, err = netutils.ParseCIDRSloppy(ctx.ComponentConfig.NodeIPAMController.SecondaryServiceCIDR)
if err != nil {
klog.Warningf("Unsuccessful parsing of service CIDR %v: %v", ctx.ComponentConfig.NodeIPAMController.SecondaryServiceCIDR, err)
}

View File

@ -45,6 +45,7 @@ import (
kubectrlmgrconfigscheme "k8s.io/kubernetes/pkg/controller/apis/config/scheme"
"k8s.io/kubernetes/pkg/controller/garbagecollector"
garbagecollectorconfig "k8s.io/kubernetes/pkg/controller/garbagecollector/config"
netutils "k8s.io/utils/net"
// add the kubernetes feature gates
_ "k8s.io/kubernetes/pkg/features"
@ -427,7 +428,7 @@ func (s KubeControllerManagerOptions) Config(allControllers []string, disabledBy
return nil, err
}
if err := s.SecureServing.MaybeDefaultWithSelfSignedCerts("localhost", nil, []net.IP{net.ParseIP("127.0.0.1")}); err != nil {
if err := s.SecureServing.MaybeDefaultWithSelfSignedCerts("localhost", nil, []net.IP{netutils.ParseIPSloppy("127.0.0.1")}); err != nil {
return nil, fmt.Errorf("error creating self-signed certificates: %v", err)
}

View File

@ -17,7 +17,6 @@ limitations under the License.
package options
import (
"net"
"reflect"
"sort"
"testing"
@ -61,6 +60,7 @@ import (
attachdetachconfig "k8s.io/kubernetes/pkg/controller/volume/attachdetach/config"
ephemeralvolumeconfig "k8s.io/kubernetes/pkg/controller/volume/ephemeral/config"
persistentvolumeconfig "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/config"
netutils "k8s.io/utils/net"
)
var args = []string{
@ -403,7 +403,7 @@ func TestAddFlags(t *testing.T) {
},
SecureServing: (&apiserveroptions.SecureServingOptions{
BindPort: 10001,
BindAddress: net.ParseIP("192.168.4.21"),
BindAddress: netutils.ParseIPSloppy("192.168.4.21"),
ServerCert: apiserveroptions.GeneratableKeyCert{
CertDirectory: "/a/b/c",
PairName: "kube-controller-manager",

View File

@ -86,7 +86,7 @@ import (
utilipvs "k8s.io/kubernetes/pkg/util/ipvs"
"k8s.io/kubernetes/pkg/util/oom"
"k8s.io/utils/exec"
utilsnet "k8s.io/utils/net"
netutils "k8s.io/utils/net"
utilpointer "k8s.io/utils/pointer"
)
@ -836,13 +836,13 @@ func (s *ProxyServer) CleanupAndExit() error {
// 2. the primary IP from the Node object, if set
// 3. if no IP is found it defaults to 127.0.0.1 and IPv4
func detectNodeIP(client clientset.Interface, hostname, bindAddress string) net.IP {
nodeIP := net.ParseIP(bindAddress)
nodeIP := netutils.ParseIPSloppy(bindAddress)
if nodeIP.IsUnspecified() {
nodeIP = utilnode.GetNodeIP(client, hostname)
}
if nodeIP == nil {
klog.V(0).Infof("can't determine this node's IP, assuming 127.0.0.1; if this is incorrect, please set the --bind-address flag")
nodeIP = net.ParseIP("127.0.0.1")
nodeIP = netutils.ParseIPSloppy("127.0.0.1")
}
return nodeIP
}
@ -853,8 +853,8 @@ func detectNodeIP(client clientset.Interface, hostname, bindAddress string) net.
func nodeIPTuple(bindAddress string) [2]net.IP {
nodes := [2]net.IP{net.IPv4zero, net.IPv6zero}
adr := net.ParseIP(bindAddress)
if utilsnet.IsIPv6(adr) {
adr := netutils.ParseIPSloppy(bindAddress)
if netutils.IsIPv6(adr) {
nodes[1] = adr
} else {
nodes[0] = adr

View File

@ -24,7 +24,6 @@ import (
"context"
"errors"
"fmt"
"net"
goruntime "runtime"
"strings"
"time"
@ -65,7 +64,7 @@ import (
utilnode "k8s.io/kubernetes/pkg/util/node"
utilsysctl "k8s.io/kubernetes/pkg/util/sysctl"
"k8s.io/utils/exec"
utilsnet "k8s.io/utils/net"
netutils "k8s.io/utils/net"
"k8s.io/klog/v2"
)
@ -177,7 +176,7 @@ func newProxyServer(
klog.V(2).InfoS("DetectLocalMode", "LocalMode", string(detectLocalMode))
primaryProtocol := utiliptables.ProtocolIPv4
if utilsnet.IsIPv6(nodeIP) {
if netutils.IsIPv6(nodeIP) {
primaryProtocol = utiliptables.ProtocolIPv6
}
iptInterface = utiliptables.New(execer, primaryProtocol)
@ -350,7 +349,7 @@ func newProxyServer(
// TODO this has side effects that should only happen when Run() is invoked.
proxier, err = userspace.NewProxier(
userspace.NewLoadBalancerRR(),
net.ParseIP(config.BindAddress),
netutils.ParseIPSloppy(config.BindAddress),
iptInterface,
execer,
*utilnet.ParsePortRangeOrDie(config.PortRange),
@ -504,7 +503,7 @@ func getDualStackLocalDetectorTuple(mode proxyconfigapi.LocalMode, config *proxy
}
// localDetectors, like ipt, need to be of the order [IPv4, IPv6], but PodCIDRs is setup so that PodCIDRs[0] == PodCIDR.
// so have to handle the case where PodCIDR can be IPv6 and set that to localDetectors[1]
if utilsnet.IsIPv6CIDRString(nodeInfo.Spec.PodCIDR) {
if netutils.IsIPv6CIDRString(nodeInfo.Spec.PodCIDR) {
localDetectors[1], err = proxyutiliptables.NewDetectLocalByCIDR(nodeInfo.Spec.PodCIDR, ipt[1])
if err != nil {
return localDetectors, err
@ -538,7 +537,7 @@ func cidrTuple(cidrList string) [2]string {
foundIPv6 := false
for _, cidr := range strings.Split(cidrList, ",") {
if utilsnet.IsIPv6CIDRString(cidr) && !foundIPv6 {
if netutils.IsIPv6CIDRString(cidr) && !foundIPv6 {
cidrs[1] = cidr
foundIPv6 = true
} else if !foundIPv4 {

View File

@ -26,6 +26,7 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
netutils "k8s.io/utils/net"
clientsetfake "k8s.io/client-go/kubernetes/fake"
@ -232,21 +233,21 @@ func Test_detectNodeIP(t *testing.T) {
nodeInfo: makeNodeWithAddresses("", "", ""),
hostname: "fakeHost",
bindAddress: "10.0.0.1",
expectedIP: net.ParseIP("10.0.0.1"),
expectedIP: netutils.ParseIPSloppy("10.0.0.1"),
},
{
name: "Bind address IPv6 unicast address and no Node object",
nodeInfo: makeNodeWithAddresses("", "", ""),
hostname: "fakeHost",
bindAddress: "fd00:4321::2",
expectedIP: net.ParseIP("fd00:4321::2"),
expectedIP: netutils.ParseIPSloppy("fd00:4321::2"),
},
{
name: "No Valid IP found",
nodeInfo: makeNodeWithAddresses("", "", ""),
hostname: "fakeHost",
bindAddress: "",
expectedIP: net.ParseIP("127.0.0.1"),
expectedIP: netutils.ParseIPSloppy("127.0.0.1"),
},
// Disabled because the GetNodeIP method has a backoff retry mechanism
// and the test takes more than 30 seconds
@ -256,63 +257,63 @@ func Test_detectNodeIP(t *testing.T) {
// nodeInfo: makeNodeWithAddresses("", "", ""),
// hostname: "fakeHost",
// bindAddress: "0.0.0.0",
// expectedIP: net.ParseIP("127.0.0.1"),
// expectedIP: net.IP{127,0,0,1),
// },
{
name: "Bind address 0.0.0.0 and node with IPv4 InternalIP set",
nodeInfo: makeNodeWithAddresses("fakeHost", "192.168.1.1", "90.90.90.90"),
hostname: "fakeHost",
bindAddress: "0.0.0.0",
expectedIP: net.ParseIP("192.168.1.1"),
expectedIP: netutils.ParseIPSloppy("192.168.1.1"),
},
{
name: "Bind address :: and node with IPv4 InternalIP set",
nodeInfo: makeNodeWithAddresses("fakeHost", "192.168.1.1", "90.90.90.90"),
hostname: "fakeHost",
bindAddress: "::",
expectedIP: net.ParseIP("192.168.1.1"),
expectedIP: netutils.ParseIPSloppy("192.168.1.1"),
},
{
name: "Bind address 0.0.0.0 and node with IPv6 InternalIP set",
nodeInfo: makeNodeWithAddresses("fakeHost", "fd00:1234::1", "2001:db8::2"),
hostname: "fakeHost",
bindAddress: "0.0.0.0",
expectedIP: net.ParseIP("fd00:1234::1"),
expectedIP: netutils.ParseIPSloppy("fd00:1234::1"),
},
{
name: "Bind address :: and node with IPv6 InternalIP set",
nodeInfo: makeNodeWithAddresses("fakeHost", "fd00:1234::1", "2001:db8::2"),
hostname: "fakeHost",
bindAddress: "::",
expectedIP: net.ParseIP("fd00:1234::1"),
expectedIP: netutils.ParseIPSloppy("fd00:1234::1"),
},
{
name: "Bind address 0.0.0.0 and node with only IPv4 ExternalIP set",
nodeInfo: makeNodeWithAddresses("fakeHost", "", "90.90.90.90"),
hostname: "fakeHost",
bindAddress: "0.0.0.0",
expectedIP: net.ParseIP("90.90.90.90"),
expectedIP: netutils.ParseIPSloppy("90.90.90.90"),
},
{
name: "Bind address :: and node with only IPv4 ExternalIP set",
nodeInfo: makeNodeWithAddresses("fakeHost", "", "90.90.90.90"),
hostname: "fakeHost",
bindAddress: "::",
expectedIP: net.ParseIP("90.90.90.90"),
expectedIP: netutils.ParseIPSloppy("90.90.90.90"),
},
{
name: "Bind address 0.0.0.0 and node with only IPv6 ExternalIP set",
nodeInfo: makeNodeWithAddresses("fakeHost", "", "2001:db8::2"),
hostname: "fakeHost",
bindAddress: "0.0.0.0",
expectedIP: net.ParseIP("2001:db8::2"),
expectedIP: netutils.ParseIPSloppy("2001:db8::2"),
},
{
name: "Bind address :: and node with only IPv6 ExternalIP set",
nodeInfo: makeNodeWithAddresses("fakeHost", "", "2001:db8::2"),
hostname: "fakeHost",
bindAddress: "::",
expectedIP: net.ParseIP("2001:db8::2"),
expectedIP: netutils.ParseIPSloppy("2001:db8::2"),
},
}
for _, c := range cases {

View File

@ -23,7 +23,6 @@ package app
import (
"errors"
"fmt"
"net"
goruntime "runtime"
// Enable pprof HTTP handlers.
@ -45,6 +44,7 @@ import (
utilnetsh "k8s.io/kubernetes/pkg/util/netsh"
utilnode "k8s.io/kubernetes/pkg/util/node"
"k8s.io/utils/exec"
netutils "k8s.io/utils/net"
)
// NewProxyServer returns a new ProxyServer.
@ -148,7 +148,7 @@ func newProxyServer(config *proxyconfigapi.KubeProxyConfiguration, cleanupAndExi
proxier, err = winuserspace.NewProxier(
winuserspace.NewLoadBalancerRR(),
net.ParseIP(config.BindAddress),
netutils.ParseIPSloppy(config.BindAddress),
netshInterface,
*utilnet.ParsePortRangeOrDie(config.PortRange),
// TODO @pires replace below with default values, if applicable

View File

@ -26,6 +26,7 @@ import (
apiserveroptions "k8s.io/apiserver/pkg/server/options"
schedulerappconfig "k8s.io/kubernetes/cmd/kube-scheduler/app/config"
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
netutils "k8s.io/utils/net"
)
// CombinedInsecureServingOptions sets up to two insecure listeners for healthz and metrics. The flags
@ -78,11 +79,11 @@ func (o *CombinedInsecureServingOptions) ApplyTo(c *schedulerappconfig.Config, c
if o.Healthz != nil {
o.Healthz.BindPort = o.BindPort
o.Healthz.BindAddress = net.ParseIP(o.BindAddress)
o.Healthz.BindAddress = netutils.ParseIPSloppy(o.BindAddress)
}
if o.Metrics != nil {
o.Metrics.BindPort = o.BindPort
o.Metrics.BindAddress = net.ParseIP(o.BindAddress)
o.Metrics.BindAddress = netutils.ParseIPSloppy(o.BindAddress)
}
return o.applyTo(c, componentConfig)
@ -125,7 +126,7 @@ func updateDeprecatedInsecureServingOptionsFromAddress(is *apiserveroptions.Depr
} else {
// In the previous `validate` process, we can ensure that the `addr` is legal, so ignore the error
host, portInt, _ := splitHostIntPort(addr)
is.BindAddress = net.ParseIP(host)
is.BindAddress = netutils.ParseIPSloppy(host)
is.BindPort = portInt
}
}
@ -142,7 +143,7 @@ func (o *CombinedInsecureServingOptions) Validate() []error {
errors = append(errors, fmt.Errorf("--port %v must be between 0 and 65535, inclusive. 0 for turning off insecure (HTTP) port", o.BindPort))
}
if len(o.BindAddress) > 0 && net.ParseIP(o.BindAddress) == nil {
if len(o.BindAddress) > 0 && netutils.ParseIPSloppy(o.BindAddress) == nil {
errors = append(errors, fmt.Errorf("--address %v is an invalid IP address", o.BindAddress))
}

View File

@ -45,6 +45,7 @@ import (
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/latest"
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
netutils "k8s.io/utils/net"
)
// Options has all the params needed to run a Scheduler
@ -286,7 +287,7 @@ func (o *Options) Validate() []error {
// Config return a scheduler config object
func (o *Options) Config() (*schedulerappconfig.Config, error) {
if o.SecureServing != nil {
if err := o.SecureServing.MaybeDefaultWithSelfSignedCerts("localhost", nil, []net.IP{net.ParseIP("127.0.0.1")}); err != nil {
if err := o.SecureServing.MaybeDefaultWithSelfSignedCerts("localhost", nil, []net.IP{netutils.ParseIPSloppy("127.0.0.1")}); err != nil {
return nil, fmt.Errorf("error creating self-signed certificates: %v", err)
}
}

View File

@ -20,6 +20,8 @@ import (
"net"
"strconv"
netutils "k8s.io/utils/net"
"github.com/pkg/errors"
)
@ -29,7 +31,7 @@ func APIEndpointFromString(apiEndpoint string) (APIEndpoint, error) {
if err != nil {
return APIEndpoint{}, errors.Wrapf(err, "invalid advertise address endpoint: %s", apiEndpoint)
}
if net.ParseIP(apiEndpointHost) == nil {
if netutils.ParseIPSloppy(apiEndpointHost) == nil {
return APIEndpoint{}, errors.Errorf("invalid API endpoint IP: %s", apiEndpointHost)
}
apiEndpointPort, err := net.LookupPort("tcp", apiEndpointPortStr)

View File

@ -34,7 +34,7 @@ import (
bootstrapapi "k8s.io/cluster-bootstrap/token/api"
bootstraputil "k8s.io/cluster-bootstrap/token/util"
"k8s.io/klog/v2"
utilnet "k8s.io/utils/net"
netutils "k8s.io/utils/net"
bootstraptokenv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/bootstraptoken/v1"
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
@ -319,7 +319,7 @@ func ValidateCertSANs(altnames []string, fldPath *field.Path) field.ErrorList {
for _, altname := range altnames {
if errs := validation.IsDNS1123Subdomain(altname); len(errs) != 0 {
if errs2 := validation.IsWildcardDNS1123Subdomain(altname); len(errs2) != 0 {
if net.ParseIP(altname) == nil {
if netutils.ParseIPSloppy(altname) == nil {
allErrs = append(allErrs, field.Invalid(fldPath, altname, fmt.Sprintf("altname is not a valid IP address, DNS label or a DNS label with subdomain wildcards: %s; %s", strings.Join(errs, "; "), strings.Join(errs2, "; "))))
}
}
@ -350,7 +350,7 @@ func ValidateURLs(urls []string, requireHTTPS bool, fldPath *field.Path) field.E
// ValidateIPFromString validates ip address
func ValidateIPFromString(ipaddr string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if net.ParseIP(ipaddr) == nil {
if netutils.ParseIPSloppy(ipaddr) == nil {
allErrs = append(allErrs, field.Invalid(fldPath, ipaddr, "ip address is not valid"))
}
return allErrs
@ -377,7 +377,7 @@ func ValidateHostPort(endpoint string, fldPath *field.Path) field.ErrorList {
// ValidateIPNetFromString validates network portion of ip address
func ValidateIPNetFromString(subnetStr string, minAddrs int64, isDualStack bool, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
subnets, err := utilnet.ParseCIDRs(strings.Split(subnetStr, ","))
subnets, err := netutils.ParseCIDRs(strings.Split(subnetStr, ","))
if err != nil {
allErrs = append(allErrs, field.Invalid(fldPath, subnetStr, "couldn't parse subnet"))
return allErrs
@ -388,7 +388,7 @@ func ValidateIPNetFromString(subnetStr string, minAddrs int64, isDualStack bool,
allErrs = append(allErrs, field.Invalid(fldPath, subnetStr, "expected one (IPv4 or IPv6) CIDR or two CIDRs from each family for dual-stack networking"))
// if DualStack and there are 2 CIDRs validate if there is at least one of each IP family
case isDualStack && len(subnets) == 2:
areDualStackCIDRs, err := utilnet.IsDualStackCIDRs(subnets)
areDualStackCIDRs, err := netutils.IsDualStackCIDRs(subnets)
if err != nil {
allErrs = append(allErrs, field.Invalid(fldPath, subnetStr, err.Error()))
} else if !areDualStackCIDRs {
@ -400,13 +400,13 @@ func ValidateIPNetFromString(subnetStr string, minAddrs int64, isDualStack bool,
}
// validate the subnet/s
for _, s := range subnets {
numAddresses := utilnet.RangeSize(s)
numAddresses := netutils.RangeSize(s)
if numAddresses < minAddrs {
allErrs = append(allErrs, field.Invalid(fldPath, s.String(), fmt.Sprintf("subnet with %d address(es) is too small, the minimum is %d", numAddresses, minAddrs)))
}
// Warn when the subnet is in site-local range - i.e. contains addresses that belong to fec0::/10
_, siteLocalNet, _ := net.ParseCIDR("fec0::/10")
_, siteLocalNet, _ := netutils.ParseCIDRSloppy("fec0::/10")
if siteLocalNet.Contains(s.IP) || s.Contains(siteLocalNet.IP) {
klog.Warningf("the subnet %v contains IPv6 site-local addresses that belong to fec0::/10 which has been deprecated by rfc3879", s)
}
@ -422,7 +422,7 @@ func ValidateIPNetFromString(subnetStr string, minAddrs int64, isDualStack bool,
func ValidateServiceSubnetSize(subnetStr string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
// subnets were already validated
subnets, _ := utilnet.ParseCIDRs(strings.Split(subnetStr, ","))
subnets, _ := netutils.ParseCIDRs(strings.Split(subnetStr, ","))
for _, serviceSubnet := range subnets {
ones, bits := serviceSubnet.Mask.Size()
if bits-ones > constants.MaximumBitsForServiceSubnet {
@ -437,13 +437,13 @@ func ValidateServiceSubnetSize(subnetStr string, fldPath *field.Path) field.Erro
func ValidatePodSubnetNodeMask(subnetStr string, c *kubeadm.ClusterConfiguration, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
// subnets were already validated
subnets, _ := utilnet.ParseCIDRs(strings.Split(subnetStr, ","))
subnets, _ := netutils.ParseCIDRs(strings.Split(subnetStr, ","))
for _, podSubnet := range subnets {
// obtain podSubnet mask
mask := podSubnet.Mask
maskSize, _ := mask.Size()
// obtain node-cidr-mask
nodeMask, err := getClusterNodeMask(c, utilnet.IsIPv6(podSubnet.IP))
nodeMask, err := getClusterNodeMask(c, netutils.IsIPv6(podSubnet.IP))
if err != nil {
allErrs = append(allErrs, field.Invalid(fldPath, podSubnet.String(), err.Error()))
continue

View File

@ -17,10 +17,9 @@ limitations under the License.
package componentconfigs
import (
"net"
clientset "k8s.io/client-go/kubernetes"
kubeproxyconfig "k8s.io/kube-proxy/config/v1alpha1"
netutils "k8s.io/utils/net"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmapiv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
@ -76,7 +75,7 @@ func (kp *kubeProxyConfig) Unmarshal(docmap kubeadmapi.DocumentMap) error {
}
func kubeProxyDefaultBindAddress(localAdvertiseAddress string) string {
ip := net.ParseIP(localAdvertiseAddress)
ip := netutils.ParseIPSloppy(localAdvertiseAddress)
if ip.To4() != nil {
return kubeadmapiv1.DefaultProxyBindAddressv4
}

View File

@ -34,7 +34,7 @@ import (
apimachineryversion "k8s.io/apimachinery/pkg/version"
bootstrapapi "k8s.io/cluster-bootstrap/token/api"
componentversion "k8s.io/component-base/version"
utilnet "k8s.io/utils/net"
netutils "k8s.io/utils/net"
)
const (
@ -635,7 +635,7 @@ func GetDNSIP(svcSubnetList string, isDualStack bool) (net.IP, error) {
}
// Selects the 10th IP in service subnet CIDR range as dnsIP
dnsIP, err := utilnet.GetIndexedIP(svcSubnetCIDR, 10)
dnsIP, err := netutils.GetIndexedIP(svcSubnetCIDR, 10)
if err != nil {
return nil, errors.Wrap(err, "unable to get internal Kubernetes Service IP from the given service CIDR")
}
@ -649,7 +649,7 @@ func GetKubernetesServiceCIDR(svcSubnetList string, isDualStack bool) (*net.IPNe
// The default service address family for the cluster is the address family of the first
// service cluster IP range configured via the `--service-cluster-ip-range` flag
// of the kube-controller-manager and kube-apiserver.
svcSubnets, err := utilnet.ParseCIDRs(strings.Split(svcSubnetList, ","))
svcSubnets, err := netutils.ParseCIDRs(strings.Split(svcSubnetList, ","))
if err != nil {
return nil, errors.Wrapf(err, "unable to parse ServiceSubnet %v", svcSubnetList)
}
@ -659,7 +659,7 @@ func GetKubernetesServiceCIDR(svcSubnetList string, isDualStack bool) (*net.IPNe
return svcSubnets[0], nil
}
// internal IP address for the API server
_, svcSubnet, err := net.ParseCIDR(svcSubnetList)
_, svcSubnet, err := netutils.ParseCIDRSloppy(svcSubnetList)
if err != nil {
return nil, errors.Wrapf(err, "unable to parse ServiceSubnet %v", svcSubnetList)
}
@ -672,7 +672,7 @@ func GetAPIServerVirtualIP(svcSubnetList string, isDualStack bool) (net.IP, erro
if err != nil {
return nil, errors.Wrap(err, "unable to get internal Kubernetes Service IP from the given service CIDR")
}
internalAPIServerVirtualIP, err := utilnet.GetIndexedIP(svcSubnet, 1)
internalAPIServerVirtualIP, err := netutils.GetIndexedIP(svcSubnet, 1)
if err != nil {
return nil, errors.Wrapf(err, "unable to get the first IP address from the given CIDR: %s", svcSubnet.String())
}

View File

@ -27,6 +27,7 @@ import (
"time"
certutil "k8s.io/client-go/util/cert"
netutils "k8s.io/utils/net"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
certtestutil "k8s.io/kubernetes/cmd/kubeadm/app/util/certs"
@ -46,7 +47,7 @@ var (
CommonName: "test-common-name",
Organization: []string{"sig-cluster-lifecycle"},
AltNames: certutil.AltNames{
IPs: []net.IP{net.ParseIP("10.100.0.1")},
IPs: []net.IP{netutils.ParseIPSloppy("10.100.0.1")},
DNSNames: []string{"test-domain.space"},
},
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
@ -234,7 +235,7 @@ func TestCertToConfig(t *testing.T) {
CommonName: "test-common-name",
Organization: []string{"sig-cluster-lifecycle"},
AltNames: certutil.AltNames{
IPs: []net.IP{net.ParseIP("10.100.0.1")},
IPs: []net.IP{netutils.ParseIPSloppy("10.100.0.1")},
DNSNames: []string{"test-domain.space"},
},
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
@ -247,7 +248,7 @@ func TestCertToConfig(t *testing.T) {
},
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
DNSNames: []string{"test-domain.space"},
IPAddresses: []net.IP{net.ParseIP("10.100.0.1")},
IPAddresses: []net.IP{netutils.ParseIPSloppy("10.100.0.1")},
}
cfg := certToConfig(cert)

View File

@ -27,6 +27,7 @@ import (
"k8s.io/client-go/tools/clientcmd"
certutil "k8s.io/client-go/util/cert"
"k8s.io/client-go/util/keyutil"
netutils "k8s.io/utils/net"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
@ -161,7 +162,7 @@ func writeTestKubeconfig(t *testing.T, dir, name string, caCert *x509.Certificat
Organization: []string{"sig-cluster-lifecycle"},
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
AltNames: certutil.AltNames{
IPs: []net.IP{net.ParseIP("10.100.0.1")},
IPs: []net.IP{netutils.ParseIPSloppy("10.100.0.1")},
DNSNames: []string{"test-domain.space"},
},
},

View File

@ -46,7 +46,7 @@ import (
"k8s.io/klog/v2"
system "k8s.io/system-validators/validators"
utilsexec "k8s.io/utils/exec"
utilsnet "k8s.io/utils/net"
netutils "k8s.io/utils/net"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
@ -432,7 +432,7 @@ func (hst HTTPProxyCheck) Name() string {
func (hst HTTPProxyCheck) Check() (warnings, errorList []error) {
klog.V(1).Infoln("validating if the connectivity type is via proxy or direct")
u := &url.URL{Scheme: hst.Proto, Host: hst.Host}
if utilsnet.IsIPv6String(hst.Host) {
if netutils.IsIPv6String(hst.Host) {
u.Host = net.JoinHostPort(hst.Host, "1234")
}
@ -474,12 +474,12 @@ func (subnet HTTPProxyCIDRCheck) Check() (warnings, errorList []error) {
return nil, nil
}
_, cidr, err := net.ParseCIDR(subnet.CIDR)
_, cidr, err := netutils.ParseCIDRSloppy(subnet.CIDR)
if err != nil {
return nil, []error{errors.Wrapf(err, "error parsing CIDR %q", subnet.CIDR)}
}
testIP, err := utilsnet.GetIndexedIP(cidr, 1)
testIP, err := netutils.GetIndexedIP(cidr, 1)
if err != nil {
return nil, []error{errors.Wrapf(err, "unable to get first IP address from the given CIDR (%s)", cidr.String())}
}
@ -941,8 +941,8 @@ func RunInitNodeChecks(execer utilsexec.Interface, cfg *kubeadmapi.InitConfigura
checks = addCommonChecks(execer, cfg.KubernetesVersion, &cfg.NodeRegistration, checks)
// Check if Bridge-netfilter and IPv6 relevant flags are set
if ip := net.ParseIP(cfg.LocalAPIEndpoint.AdvertiseAddress); ip != nil {
if utilsnet.IsIPv6(ip) {
if ip := netutils.ParseIPSloppy(cfg.LocalAPIEndpoint.AdvertiseAddress); ip != nil {
if netutils.IsIPv6(ip) {
checks = append(checks,
FileContentCheck{Path: bridgenf6, Content: []byte{'1'}},
FileContentCheck{Path: ipv6DefaultForwarding, Content: []byte{'1'}},
@ -1006,8 +1006,8 @@ func RunJoinNodeChecks(execer utilsexec.Interface, cfg *kubeadmapi.JoinConfigura
checks = append(checks,
HTTPProxyCheck{Proto: "https", Host: ipstr},
)
if ip := net.ParseIP(ipstr); ip != nil {
if utilsnet.IsIPv6(ip) {
if ip := netutils.ParseIPSloppy(ipstr); ip != nil {
if netutils.IsIPv6(ip) {
addIPv6Checks = true
}
}

View File

@ -17,18 +17,17 @@ limitations under the License.
package apiclient
import (
"net"
"strings"
"github.com/pkg/errors"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
core "k8s.io/client-go/testing"
utilnet "k8s.io/utils/net"
netutils "k8s.io/utils/net"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
)
@ -88,12 +87,12 @@ func (idr *InitDryRunGetter) handleKubernetesService(action core.GetAction) (boo
return false, nil, nil
}
_, svcSubnet, err := net.ParseCIDR(idr.serviceSubnet)
_, svcSubnet, err := netutils.ParseCIDRSloppy(idr.serviceSubnet)
if err != nil {
return true, nil, errors.Wrapf(err, "error parsing CIDR %q", idr.serviceSubnet)
}
internalAPIServerVirtualIP, err := utilnet.GetIndexedIP(svcSubnet, 1)
internalAPIServerVirtualIP, err := netutils.GetIndexedIP(svcSubnet, 1)
if err != nil {
return true, nil, errors.Wrapf(err, "unable to get first IP address from the given CIDR (%s)", svcSubnet.String())
}

View File

@ -31,6 +31,7 @@ import (
apimachineryversion "k8s.io/apimachinery/pkg/version"
componentversion "k8s.io/component-base/version"
"k8s.io/klog/v2"
netutils "k8s.io/utils/net"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme"
@ -139,7 +140,7 @@ func LowercaseSANs(sans []string) {
// VerifyAPIServerBindAddress can be used to verify if a bind address for the API Server is 0.0.0.0,
// in which case this address is not valid and should not be used.
func VerifyAPIServerBindAddress(address string) error {
ip := net.ParseIP(address)
ip := netutils.ParseIPSloppy(address)
if ip == nil {
return errors.Errorf("cannot parse IP address: %s", address)
}
@ -164,7 +165,7 @@ func ChooseAPIServerBindAddress(bindAddress net.IP) (net.IP, error) {
if err != nil {
if netutil.IsNoRoutesError(err) {
klog.Warningf("WARNING: could not obtain a bind address for the API Server: %v; using: %s", err, constants.DefaultAPIServerBindAddress)
defaultIP := net.ParseIP(constants.DefaultAPIServerBindAddress)
defaultIP := netutils.ParseIPSloppy(constants.DefaultAPIServerBindAddress)
if defaultIP == nil {
return nil, errors.Errorf("cannot parse default IP address: %s", constants.DefaultAPIServerBindAddress)
}

View File

@ -31,6 +31,7 @@ import (
netutil "k8s.io/apimachinery/pkg/util/net"
bootstraputil "k8s.io/cluster-bootstrap/token/util"
"k8s.io/klog/v2"
netutils "k8s.io/utils/net"
bootstraptokenv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/bootstraptoken/v1"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
@ -122,7 +123,7 @@ func SetNodeRegistrationDynamicDefaults(cfg *kubeadmapi.NodeRegistrationOptions,
// SetAPIEndpointDynamicDefaults checks and sets configuration values for the APIEndpoint object
func SetAPIEndpointDynamicDefaults(cfg *kubeadmapi.APIEndpoint) error {
// validate cfg.API.AdvertiseAddress.
addressIP := net.ParseIP(cfg.AdvertiseAddress)
addressIP := netutils.ParseIPSloppy(cfg.AdvertiseAddress)
if addressIP == nil && cfg.AdvertiseAddress != "" {
return errors.Errorf("couldn't use \"%s\" as \"apiserver-advertise-address\", must be ipv4 or ipv6 address", cfg.AdvertiseAddress)
}

View File

@ -25,7 +25,7 @@ import (
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/util/validation"
utilsnet "k8s.io/utils/net"
netutils "k8s.io/utils/net"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
)
@ -100,7 +100,7 @@ func ParseHostPort(hostport string) (string, string, error) {
}
// if host is a valid IP, returns it
if ip := net.ParseIP(host); ip != nil {
if ip := netutils.ParseIPSloppy(host); ip != nil {
return host, port, nil
}
@ -115,7 +115,7 @@ func ParseHostPort(hostport string) (string, string, error) {
// ParsePort parses a string representing a TCP port.
// If the string is not a valid representation of a TCP port, ParsePort returns an error.
func ParsePort(port string) (int, error) {
portInt, err := utilsnet.ParsePort(port, true)
portInt, err := netutils.ParsePort(port, true)
if err == nil && (1 <= portInt && portInt <= 65535) {
return portInt, nil
}
@ -133,7 +133,7 @@ func parseAPIEndpoint(localEndpoint *kubeadmapi.APIEndpoint) (net.IP, string, er
}
// parse the AdvertiseAddress
var ip = net.ParseIP(localEndpoint.AdvertiseAddress)
var ip = netutils.ParseIPSloppy(localEndpoint.AdvertiseAddress)
if ip == nil {
return nil, "", errors.Errorf("invalid value `%s` given for api.advertiseAddress", localEndpoint.AdvertiseAddress)
}

View File

@ -41,6 +41,7 @@ import (
"k8s.io/apimachinery/pkg/util/validation"
certutil "k8s.io/client-go/util/cert"
"k8s.io/client-go/util/keyutil"
netutils "k8s.io/utils/net"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
@ -417,7 +418,7 @@ func pathForCSR(pkiPath, name string) string {
// GetAPIServerAltNames builds an AltNames object for to be used when generating apiserver certificate
func GetAPIServerAltNames(cfg *kubeadmapi.InitConfiguration) (*certutil.AltNames, error) {
// advertise address
advertiseAddress := net.ParseIP(cfg.LocalAPIEndpoint.AdvertiseAddress)
advertiseAddress := netutils.ParseIPSloppy(cfg.LocalAPIEndpoint.AdvertiseAddress)
if advertiseAddress == nil {
return nil, errors.Errorf("error parsing LocalAPIEndpoint AdvertiseAddress %v: is not a valid textual representation of an IP address",
cfg.LocalAPIEndpoint.AdvertiseAddress)
@ -446,7 +447,7 @@ func GetAPIServerAltNames(cfg *kubeadmapi.InitConfiguration) (*certutil.AltNames
// add cluster controlPlaneEndpoint if present (dns or ip)
if len(cfg.ControlPlaneEndpoint) > 0 {
if host, _, err := kubeadmutil.ParseHostPort(cfg.ControlPlaneEndpoint); err == nil {
if ip := net.ParseIP(host); ip != nil {
if ip := netutils.ParseIPSloppy(host); ip != nil {
altNames.IPs = append(altNames.IPs, ip)
} else {
altNames.DNSNames = append(altNames.DNSNames, host)
@ -478,7 +479,7 @@ func GetEtcdPeerAltNames(cfg *kubeadmapi.InitConfiguration) (*certutil.AltNames,
// getAltNames builds an AltNames object with the cfg and certName.
func getAltNames(cfg *kubeadmapi.InitConfiguration, certName string) (*certutil.AltNames, error) {
// advertise address
advertiseAddress := net.ParseIP(cfg.LocalAPIEndpoint.AdvertiseAddress)
advertiseAddress := netutils.ParseIPSloppy(cfg.LocalAPIEndpoint.AdvertiseAddress)
if advertiseAddress == nil {
return nil, errors.Errorf("error parsing LocalAPIEndpoint AdvertiseAddress %v: is not a valid textual representation of an IP address",
cfg.LocalAPIEndpoint.AdvertiseAddress)
@ -508,7 +509,7 @@ func getAltNames(cfg *kubeadmapi.InitConfiguration, certName string) (*certutil.
// certNames is used to print user facing warnings and should be the name of the cert the altNames will be used for
func appendSANsToAltNames(altNames *certutil.AltNames, SANs []string, certName string) {
for _, altname := range SANs {
if ip := net.ParseIP(altname); ip != nil {
if ip := netutils.ParseIPSloppy(altname); ip != nil {
altNames.IPs = append(altNames.IPs, ip)
} else if len(validation.IsDNS1123Subdomain(altname)) == 0 {
altNames.DNSNames = append(altNames.DNSNames, altname)

View File

@ -30,6 +30,7 @@ import (
"testing"
certutil "k8s.io/client-go/util/cert"
netutils "k8s.io/utils/net"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
)
@ -633,7 +634,7 @@ func TestGetAPIServerAltNames(t *testing.T) {
for _, IPAddress := range rt.expectedIPAddresses {
found := false
for _, val := range altNames.IPs {
if val.Equal(net.ParseIP(IPAddress)) {
if val.Equal(netutils.ParseIPSloppy(IPAddress)) {
found = true
break
}
@ -698,7 +699,7 @@ func TestGetEtcdAltNames(t *testing.T) {
t.Run(IPAddress, func(t *testing.T) {
found := false
for _, val := range altNames.IPs {
if val.Equal(net.ParseIP(IPAddress)) {
if val.Equal(netutils.ParseIPSloppy(IPAddress)) {
found = true
break
}
@ -757,7 +758,7 @@ func TestGetEtcdPeerAltNames(t *testing.T) {
for _, IPAddress := range expectedIPAddresses {
found := false
for _, val := range altNames.IPs {
if val.Equal(net.ParseIP(IPAddress)) {
if val.Equal(netutils.ParseIPSloppy(IPAddress)) {
found = true
break
}

View File

@ -103,7 +103,7 @@ import (
"k8s.io/kubernetes/pkg/volume/util/hostutil"
"k8s.io/kubernetes/pkg/volume/util/subpath"
"k8s.io/utils/exec"
utilnet "k8s.io/utils/net"
netutils "k8s.io/utils/net"
)
const (
@ -1122,7 +1122,7 @@ func RunKubelet(kubeServer *options.KubeletServer, kubeDeps *kubelet.Dependencie
var nodeIPs []net.IP
if kubeServer.NodeIP != "" {
for _, ip := range strings.Split(kubeServer.NodeIP, ",") {
parsedNodeIP := net.ParseIP(strings.TrimSpace(ip))
parsedNodeIP := netutils.ParseIPSloppy(strings.TrimSpace(ip))
if parsedNodeIP == nil {
klog.InfoS("Could not parse --node-ip ignoring", "IP", ip)
} else {
@ -1132,7 +1132,7 @@ func RunKubelet(kubeServer *options.KubeletServer, kubeDeps *kubelet.Dependencie
}
if !utilfeature.DefaultFeatureGate.Enabled(features.IPv6DualStack) && len(nodeIPs) > 1 {
return fmt.Errorf("dual-stack --node-ip %q not supported in a single-stack cluster", kubeServer.NodeIP)
} else if len(nodeIPs) > 2 || (len(nodeIPs) == 2 && utilnet.IsIPv6(nodeIPs[0]) == utilnet.IsIPv6(nodeIPs[1])) {
} else if len(nodeIPs) > 2 || (len(nodeIPs) == 2 && netutils.IsIPv6(nodeIPs[0]) == netutils.IsIPv6(nodeIPs[1])) {
return fmt.Errorf("bad --node-ip %q; must contain either a single IP or a dual-stack pair of IPs", kubeServer.NodeIP)
} else if len(nodeIPs) == 2 && kubeServer.CloudProvider != "" {
return fmt.Errorf("dual-stack --node-ip %q not supported when using a cloud provider", kubeServer.NodeIP)
@ -1224,7 +1224,7 @@ func startKubelet(k kubelet.Bootstrap, podCfg *config.PodConfig, kubeCfg *kubele
go k.ListenAndServe(kubeCfg, kubeDeps.TLSOptions, kubeDeps.Auth)
}
if kubeCfg.ReadOnlyPort > 0 {
go k.ListenAndServeReadOnly(net.ParseIP(kubeCfg.Address), uint(kubeCfg.ReadOnlyPort))
go k.ListenAndServeReadOnly(netutils.ParseIPSloppy(kubeCfg.Address), uint(kubeCfg.ReadOnlyPort))
}
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPodResources) {
go k.ListenAndServePodResources()

View File

@ -3112,7 +3112,7 @@ func validatePodDNSConfig(dnsConfig *core.PodDNSConfig, dnsPolicy *core.DNSPolic
allErrs = append(allErrs, field.Invalid(fldPath.Child("nameservers"), dnsConfig.Nameservers, fmt.Sprintf("must not have more than %v nameservers", MaxDNSNameservers)))
}
for i, ns := range dnsConfig.Nameservers {
if ip := net.ParseIP(ns); ip == nil {
if ip := netutils.ParseIPSloppy(ns); ip == nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("nameservers").Index(i), ns, "must be valid IP address"))
}
}
@ -3246,7 +3246,7 @@ func validateOnlyAddedTolerations(newTolerations []core.Toleration, oldToleratio
func ValidateHostAliases(hostAliases []core.HostAlias, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for _, hostAlias := range hostAliases {
if ip := net.ParseIP(hostAlias.IP); ip == nil {
if ip := netutils.ParseIPSloppy(hostAlias.IP); ip == nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("ip"), hostAlias.IP, "must be valid IP address"))
}
for _, hostname := range hostAlias.Hostnames {
@ -5840,7 +5840,7 @@ func validateEndpointAddress(address *core.EndpointAddress, fldPath *field.Path)
// - https://www.iana.org/assignments/ipv6-multicast-addresses/ipv6-multicast-addresses.xhtml
func ValidateNonSpecialIP(ipAddress string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
ip := net.ParseIP(ipAddress)
ip := netutils.ParseIPSloppy(ipAddress)
if ip == nil {
allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "must be a valid IP address"))
return allErrs
@ -6160,7 +6160,7 @@ func ValidateLoadBalancerStatus(status *core.LoadBalancerStatus, fldPath *field.
for i, ingress := range status.Ingress {
idxPath := fldPath.Child("ingress").Index(i)
if len(ingress.IP) > 0 {
if isIP := (net.ParseIP(ingress.IP) != nil); !isIP {
if isIP := (netutils.ParseIPSloppy(ingress.IP) != nil); !isIP {
allErrs = append(allErrs, field.Invalid(idxPath.Child("ip"), ingress.IP, "must be a valid IP address"))
}
}
@ -6168,7 +6168,7 @@ func ValidateLoadBalancerStatus(status *core.LoadBalancerStatus, fldPath *field.
for _, msg := range validation.IsDNS1123Subdomain(ingress.Hostname) {
allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, msg))
}
if isIP := (net.ParseIP(ingress.Hostname) != nil); isIP {
if isIP := (netutils.ParseIPSloppy(ingress.Hostname) != nil); isIP {
allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, "must be a DNS name, not an IP address"))
}
}
@ -6198,7 +6198,7 @@ func validateVolumeNodeAffinity(nodeAffinity *core.VolumeNodeAffinity, fldPath *
// ValidateCIDR validates whether a CIDR matches the conventions expected by net.ParseCIDR
func ValidateCIDR(cidr string) (*net.IPNet, error) {
_, net, err := net.ParseCIDR(cidr)
_, net, err := netutils.ParseCIDRSloppy(cidr)
if err != nil {
return nil, err
}

View File

@ -18,7 +18,6 @@ package validation
import (
"fmt"
"net"
"strings"
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
@ -33,6 +32,7 @@ import (
apivalidation "k8s.io/kubernetes/pkg/apis/core/validation"
"k8s.io/kubernetes/pkg/apis/networking"
"k8s.io/kubernetes/pkg/features"
netutils "k8s.io/utils/net"
utilpointer "k8s.io/utils/pointer"
)
@ -327,7 +327,7 @@ func validateIngressRules(ingressRules []networking.IngressRule, fldPath *field.
for i, ih := range ingressRules {
wildcardHost := false
if len(ih.Host) > 0 {
if isIP := (net.ParseIP(ih.Host) != nil); isIP {
if isIP := (netutils.ParseIPSloppy(ih.Host) != nil); isIP {
allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("host"), ih.Host, "must be a DNS name, not an IP address"))
}
// TODO: Ports and ips are allowed in the host part of a url

View File

@ -18,7 +18,6 @@ package endpointslicemirroring
import (
"fmt"
"net"
"strings"
corev1 "k8s.io/api/core/v1"
@ -30,6 +29,7 @@ import (
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/kubernetes/pkg/apis/discovery/validation"
endpointutil "k8s.io/kubernetes/pkg/controller/util/endpoint"
netutils "k8s.io/utils/net"
)
// addrTypePortMapKey is used to uniquely identify groups of endpoint ports and
@ -50,7 +50,7 @@ func (pk addrTypePortMapKey) addressType() discovery.AddressType {
}
func getAddressType(address string) *discovery.AddressType {
ip := net.ParseIP(address)
ip := netutils.ParseIPSloppy(address)
if ip == nil {
return nil
}

View File

@ -25,6 +25,7 @@ import (
"net"
"k8s.io/klog/v2"
netutils "k8s.io/utils/net"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -80,7 +81,7 @@ func (a *adapter) Alias(ctx context.Context, node *v1.Node) (*net.IPNet, error)
klog.Warningf("Node %q has more than one alias assigned (%v), defaulting to the first", node.Name, cidrs)
}
_, cidrRange, err := net.ParseCIDR(cidrs[0])
_, cidrRange, err := netutils.ParseCIDRSloppy(cidrs[0])
if err != nil {
return nil, err
}

View File

@ -24,6 +24,7 @@ import (
"k8s.io/component-base/metrics/testutil"
"k8s.io/klog/v2"
netutils "k8s.io/utils/net"
)
func TestCIDRSetFullyAllocated(t *testing.T) {
@ -47,7 +48,7 @@ func TestCIDRSetFullyAllocated(t *testing.T) {
},
}
for _, tc := range cases {
_, clusterCIDR, _ := net.ParseCIDR(tc.clusterCIDRStr)
_, clusterCIDR, _ := netutils.ParseCIDRSloppy(tc.clusterCIDRStr)
a, err := NewCIDRSet(clusterCIDR, tc.subNetMaskSize)
if err != nil {
t.Fatalf("unexpected error: %v for %v", err, tc.description)
@ -198,7 +199,7 @@ func TestIndexToCIDRBlock(t *testing.T) {
},
}
for _, tc := range cases {
_, clusterCIDR, _ := net.ParseCIDR(tc.clusterCIDRStr)
_, clusterCIDR, _ := netutils.ParseCIDRSloppy(tc.clusterCIDRStr)
a, err := NewCIDRSet(clusterCIDR, tc.subnetMaskSize)
if err != nil {
t.Fatalf("error for %v ", tc.description)
@ -225,7 +226,7 @@ func TestCIDRSet_RandomishAllocation(t *testing.T) {
},
}
for _, tc := range cases {
_, clusterCIDR, _ := net.ParseCIDR(tc.clusterCIDRStr)
_, clusterCIDR, _ := netutils.ParseCIDRSloppy(tc.clusterCIDRStr)
a, err := NewCIDRSet(clusterCIDR, 24)
if err != nil {
t.Fatalf("Error allocating CIDRSet for %v", tc.description)
@ -286,7 +287,7 @@ func TestCIDRSet_AllocationOccupied(t *testing.T) {
},
}
for _, tc := range cases {
_, clusterCIDR, _ := net.ParseCIDR(tc.clusterCIDRStr)
_, clusterCIDR, _ := netutils.ParseCIDRSloppy(tc.clusterCIDRStr)
a, err := NewCIDRSet(clusterCIDR, 24)
if err != nil {
t.Fatalf("Error allocating CIDRSet for %v", tc.description)
@ -399,7 +400,7 @@ func TestDoubleOccupyRelease(t *testing.T) {
// operations have been executed.
numAllocatable24s := (1 << 8) - 3
_, clusterCIDR, _ := net.ParseCIDR(clusterCIDRStr)
_, clusterCIDR, _ := netutils.ParseCIDRSloppy(clusterCIDRStr)
a, err := NewCIDRSet(clusterCIDR, 24)
if err != nil {
t.Fatalf("Error allocating CIDRSet")
@ -407,7 +408,7 @@ func TestDoubleOccupyRelease(t *testing.T) {
// Execute the operations
for _, op := range operations {
_, cidr, _ := net.ParseCIDR(op.cidrStr)
_, cidr, _ := netutils.ParseCIDRSloppy(op.cidrStr)
switch op.operation {
case "occupy":
a.Occupy(cidr)
@ -557,7 +558,7 @@ func TestGetBitforCIDR(t *testing.T) {
}
for _, tc := range cases {
_, clusterCIDR, err := net.ParseCIDR(tc.clusterCIDRStr)
_, clusterCIDR, err := netutils.ParseCIDRSloppy(tc.clusterCIDRStr)
if err != nil {
t.Fatalf("unexpected error: %v for %v", err, tc.description)
}
@ -566,7 +567,7 @@ func TestGetBitforCIDR(t *testing.T) {
if err != nil {
t.Fatalf("Error allocating CIDRSet for %v", tc.description)
}
_, subnetCIDR, err := net.ParseCIDR(tc.subNetCIDRStr)
_, subnetCIDR, err := netutils.ParseCIDRSloppy(tc.subNetCIDRStr)
if err != nil {
t.Fatalf("unexpected error: %v for %v", err, tc.description)
}
@ -727,7 +728,7 @@ func TestOccupy(t *testing.T) {
}
for _, tc := range cases {
_, clusterCIDR, err := net.ParseCIDR(tc.clusterCIDRStr)
_, clusterCIDR, err := netutils.ParseCIDRSloppy(tc.clusterCIDRStr)
if err != nil {
t.Fatalf("unexpected error: %v for %v", err, tc.description)
}
@ -737,7 +738,7 @@ func TestOccupy(t *testing.T) {
t.Fatalf("Error allocating CIDRSet for %v", tc.description)
}
_, subnetCIDR, err := net.ParseCIDR(tc.subNetCIDRStr)
_, subnetCIDR, err := netutils.ParseCIDRSloppy(tc.subNetCIDRStr)
if err != nil {
t.Fatalf("unexpected error: %v for %v", err, tc.description)
}
@ -796,7 +797,7 @@ func TestCIDRSetv6(t *testing.T) {
}
for _, tc := range cases {
t.Run(tc.description, func(t *testing.T) {
_, clusterCIDR, _ := net.ParseCIDR(tc.clusterCIDRStr)
_, clusterCIDR, _ := netutils.ParseCIDRSloppy(tc.clusterCIDRStr)
a, err := NewCIDRSet(clusterCIDR, tc.subNetMaskSize)
if gotErr := err != nil; gotErr != tc.expectErr {
t.Fatalf("NewCIDRSet(%v, %v) = %v, %v; gotErr = %t, want %t", clusterCIDR, tc.subNetMaskSize, a, err, gotErr, tc.expectErr)
@ -834,7 +835,7 @@ func TestCIDRSetv6(t *testing.T) {
func TestCidrSetMetrics(t *testing.T) {
cidr := "10.0.0.0/16"
_, clusterCIDR, _ := net.ParseCIDR(cidr)
_, clusterCIDR, _ := netutils.ParseCIDRSloppy(cidr)
// We have 256 free cidrs
a, err := NewCIDRSet(clusterCIDR, 24)
if err != nil {
@ -880,7 +881,7 @@ func TestCidrSetMetrics(t *testing.T) {
func TestCidrSetMetricsHistogram(t *testing.T) {
cidr := "10.0.0.0/16"
_, clusterCIDR, _ := net.ParseCIDR(cidr)
_, clusterCIDR, _ := netutils.ParseCIDRSloppy(cidr)
// We have 256 free cidrs
a, err := NewCIDRSet(clusterCIDR, 24)
if err != nil {
@ -890,7 +891,7 @@ func TestCidrSetMetricsHistogram(t *testing.T) {
// Allocate half of the range
// Occupy does not update the nextCandidate
_, halfClusterCIDR, _ := net.ParseCIDR("10.0.0.0/17")
_, halfClusterCIDR, _ := netutils.ParseCIDRSloppy("10.0.0.0/17")
a.Occupy(halfClusterCIDR)
em := testMetrics{
usage: 0.5,
@ -917,7 +918,7 @@ func TestCidrSetMetricsHistogram(t *testing.T) {
func TestCidrSetMetricsDual(t *testing.T) {
// create IPv4 cidrSet
cidrIPv4 := "10.0.0.0/16"
_, clusterCIDRv4, _ := net.ParseCIDR(cidrIPv4)
_, clusterCIDRv4, _ := netutils.ParseCIDRSloppy(cidrIPv4)
a, err := NewCIDRSet(clusterCIDRv4, 24)
if err != nil {
t.Fatalf("unexpected error creating CidrSet: %v", err)
@ -925,7 +926,7 @@ func TestCidrSetMetricsDual(t *testing.T) {
clearMetrics(map[string]string{"clusterCIDR": cidrIPv4})
// create IPv6 cidrSet
cidrIPv6 := "2001:db8::/48"
_, clusterCIDRv6, _ := net.ParseCIDR(cidrIPv6)
_, clusterCIDRv6, _ := netutils.ParseCIDRSloppy(cidrIPv6)
b, err := NewCIDRSet(clusterCIDRv6, 64)
if err != nil {
t.Fatalf("unexpected error creating CidrSet: %v", err)
@ -1012,7 +1013,7 @@ func expectMetrics(t *testing.T, label string, em testMetrics) {
// Benchmarks
func benchmarkAllocateAllIPv6(cidr string, subnetMaskSize int, b *testing.B) {
_, clusterCIDR, _ := net.ParseCIDR(cidr)
_, clusterCIDR, _ := netutils.ParseCIDRSloppy(cidr)
a, _ := NewCIDRSet(clusterCIDR, subnetMaskSize)
for n := 0; n < b.N; n++ {
// Allocate the whole range + 1

View File

@ -318,7 +318,7 @@ func needPodCIDRsUpdate(node *v1.Node, podCIDRs []*net.IPNet) (bool, error) {
if node.Spec.PodCIDR == "" {
return true, nil
}
_, nodePodCIDR, err := net.ParseCIDR(node.Spec.PodCIDR)
_, nodePodCIDR, err := netutils.ParseCIDRSloppy(node.Spec.PodCIDR)
if err != nil {
klog.ErrorS(err, "Found invalid node.Spec.PodCIDR", "node.Spec.PodCIDR", node.Spec.PodCIDR)
// We will try to overwrite with new CIDR(s)

View File

@ -25,8 +25,9 @@ import (
"time"
"k8s.io/klog/v2"
netutils "k8s.io/utils/net"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
informers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
@ -119,7 +120,7 @@ func (c *Controller) Start(nodeInformer informers.NodeInformer) error {
}
for _, node := range nodes.Items {
if node.Spec.PodCIDR != "" {
_, cidrRange, err := net.ParseCIDR(node.Spec.PodCIDR)
_, cidrRange, err := netutils.ParseCIDRSloppy(node.Spec.PodCIDR)
if err == nil {
c.set.Occupy(cidrRange)
klog.V(3).Infof("Occupying CIDR for node %q (%v)", node.Name, node.Spec.PodCIDR)

View File

@ -21,8 +21,9 @@ import (
"net"
"sync"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/klog/v2"
netutils "k8s.io/utils/net"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
@ -224,7 +225,7 @@ func (r *rangeAllocator) occupyCIDRs(node *v1.Node) error {
return nil
}
for idx, cidr := range node.Spec.PodCIDRs {
_, podCIDR, err := net.ParseCIDR(cidr)
_, podCIDR, err := netutils.ParseCIDRSloppy(cidr)
if err != nil {
return fmt.Errorf("failed to parse node %s, CIDR %s", node.Name, node.Spec.PodCIDR)
}
@ -286,7 +287,7 @@ func (r *rangeAllocator) ReleaseCIDR(node *v1.Node) error {
}
for idx, cidr := range node.Spec.PodCIDRs {
_, podCIDR, err := net.ParseCIDR(cidr)
_, podCIDR, err := netutils.ParseCIDRSloppy(cidr)
if err != nil {
return fmt.Errorf("failed to parse CIDR %s on Node %v: %v", cidr, node.Name, err)
}

View File

@ -30,6 +30,7 @@ import (
"k8s.io/client-go/kubernetes/fake"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/testutil"
netutils "k8s.io/utils/net"
)
const testNodePollInterval = 10 * time.Millisecond
@ -86,7 +87,7 @@ func TestOccupyPreExistingCIDR(t *testing.T) {
},
allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16")
_, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16")
return []*net.IPNet{clusterCIDRv4}
}(),
ServiceCIDR: nil,
@ -111,8 +112,8 @@ func TestOccupyPreExistingCIDR(t *testing.T) {
},
allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16")
_, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/8")
_, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16")
_, clusterCIDRv6, _ := netutils.ParseCIDRSloppy("ace:cab:deca::/8")
return []*net.IPNet{clusterCIDRv4, clusterCIDRv6}
}(),
ServiceCIDR: nil,
@ -140,7 +141,7 @@ func TestOccupyPreExistingCIDR(t *testing.T) {
},
allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16")
_, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16")
return []*net.IPNet{clusterCIDRv4}
}(),
ServiceCIDR: nil,
@ -168,8 +169,8 @@ func TestOccupyPreExistingCIDR(t *testing.T) {
},
allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16")
_, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/8")
_, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16")
_, clusterCIDRv6, _ := netutils.ParseCIDRSloppy("ace:cab:deca::/8")
return []*net.IPNet{clusterCIDRv4, clusterCIDRv6}
}(),
ServiceCIDR: nil,
@ -198,7 +199,7 @@ func TestOccupyPreExistingCIDR(t *testing.T) {
},
allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16")
_, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16")
return []*net.IPNet{clusterCIDRv4}
}(),
ServiceCIDR: nil,
@ -227,7 +228,7 @@ func TestOccupyPreExistingCIDR(t *testing.T) {
},
allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16")
_, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16")
return []*net.IPNet{clusterCIDRv4}
}(),
ServiceCIDR: nil,
@ -256,8 +257,8 @@ func TestOccupyPreExistingCIDR(t *testing.T) {
},
allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16")
_, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/8")
_, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16")
_, clusterCIDRv6, _ := netutils.ParseCIDRSloppy("ace:cab:deca::/8")
return []*net.IPNet{clusterCIDRv4, clusterCIDRv6}
}(),
ServiceCIDR: nil,
@ -286,8 +287,8 @@ func TestOccupyPreExistingCIDR(t *testing.T) {
},
allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16")
_, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/8")
_, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16")
_, clusterCIDRv6, _ := netutils.ParseCIDRSloppy("ace:cab:deca::/8")
return []*net.IPNet{clusterCIDRv4, clusterCIDRv6}
}(),
ServiceCIDR: nil,
@ -341,7 +342,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
},
allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/24")
_, clusterCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/24")
return []*net.IPNet{clusterCIDR}
}(),
ServiceCIDR: nil,
@ -366,11 +367,11 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
},
allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/24")
_, clusterCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/24")
return []*net.IPNet{clusterCIDR}
}(),
ServiceCIDR: func() *net.IPNet {
_, serviceCIDR, _ := net.ParseCIDR("127.123.234.0/26")
_, serviceCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/26")
return serviceCIDR
}(),
SecondaryServiceCIDR: nil,
@ -395,11 +396,11 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
},
allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/24")
_, clusterCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/24")
return []*net.IPNet{clusterCIDR}
}(),
ServiceCIDR: func() *net.IPNet {
_, serviceCIDR, _ := net.ParseCIDR("127.123.234.0/26")
_, serviceCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/26")
return serviceCIDR
}(),
SecondaryServiceCIDR: nil,
@ -426,12 +427,12 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
},
allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDRv4, _ := net.ParseCIDR("127.123.234.0/8")
_, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/84")
_, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("127.123.234.0/8")
_, clusterCIDRv6, _ := netutils.ParseCIDRSloppy("ace:cab:deca::/84")
return []*net.IPNet{clusterCIDRv4, clusterCIDRv6}
}(),
ServiceCIDR: func() *net.IPNet {
_, serviceCIDR, _ := net.ParseCIDR("127.123.234.0/26")
_, serviceCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/26")
return serviceCIDR
}(),
SecondaryServiceCIDR: nil,
@ -452,12 +453,12 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
},
allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDRv4, _ := net.ParseCIDR("127.123.234.0/8")
_, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/84")
_, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("127.123.234.0/8")
_, clusterCIDRv6, _ := netutils.ParseCIDRSloppy("ace:cab:deca::/84")
return []*net.IPNet{clusterCIDRv6, clusterCIDRv4}
}(),
ServiceCIDR: func() *net.IPNet {
_, serviceCIDR, _ := net.ParseCIDR("127.123.234.0/26")
_, serviceCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/26")
return serviceCIDR
}(),
SecondaryServiceCIDR: nil,
@ -478,13 +479,13 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
},
allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDRv4, _ := net.ParseCIDR("127.123.234.0/8")
_, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/84")
_, clusterCIDRv4_2, _ := net.ParseCIDR("10.0.0.0/8")
_, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("127.123.234.0/8")
_, clusterCIDRv6, _ := netutils.ParseCIDRSloppy("ace:cab:deca::/84")
_, clusterCIDRv4_2, _ := netutils.ParseCIDRSloppy("10.0.0.0/8")
return []*net.IPNet{clusterCIDRv4, clusterCIDRv6, clusterCIDRv4_2}
}(),
ServiceCIDR: func() *net.IPNet {
_, serviceCIDR, _ := net.ParseCIDR("127.123.234.0/26")
_, serviceCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/26")
return serviceCIDR
}(),
SecondaryServiceCIDR: nil,
@ -521,7 +522,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
},
allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("10.10.0.0/22")
_, clusterCIDR, _ := netutils.ParseCIDRSloppy("10.10.0.0/22")
return []*net.IPNet{clusterCIDR}
}(),
ServiceCIDR: nil,
@ -557,7 +558,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
// pre allocate the cidrs as per the test
for idx, allocatedList := range tc.allocatedCIDRs {
for _, allocated := range allocatedList {
_, cidr, err := net.ParseCIDR(allocated)
_, cidr, err := netutils.ParseCIDRSloppy(allocated)
if err != nil {
t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err)
}
@ -623,7 +624,7 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
},
allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/28")
_, clusterCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/28")
return []*net.IPNet{clusterCIDR}
}(),
ServiceCIDR: nil,
@ -654,7 +655,7 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
// this is a bit of white box testing
for setIdx, allocatedList := range tc.allocatedCIDRs {
for _, allocated := range allocatedList {
_, cidr, err := net.ParseCIDR(allocated)
_, cidr, err := netutils.ParseCIDRSloppy(allocated)
if err != nil {
t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, cidr, err)
}
@ -727,7 +728,7 @@ func TestReleaseCIDRSuccess(t *testing.T) {
},
allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/28")
_, clusterCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/28")
return []*net.IPNet{clusterCIDR}
}(),
ServiceCIDR: nil,
@ -759,7 +760,7 @@ func TestReleaseCIDRSuccess(t *testing.T) {
},
allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/28")
_, clusterCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/28")
return []*net.IPNet{clusterCIDR}
}(),
ServiceCIDR: nil,
@ -796,7 +797,7 @@ func TestReleaseCIDRSuccess(t *testing.T) {
// this is a bit of white box testing
for setIdx, allocatedList := range tc.allocatedCIDRs {
for _, allocated := range allocatedList {
_, cidr, err := net.ParseCIDR(allocated)
_, cidr, err := netutils.ParseCIDRSloppy(allocated)
if err != nil {
t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err)
}

View File

@ -23,8 +23,9 @@ import (
"time"
"k8s.io/klog/v2"
netutils "k8s.io/utils/net"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
)
@ -281,7 +282,7 @@ func (op *updateOp) updateAliasFromNode(ctx context.Context, sync *NodeSync, nod
return fmt.Errorf("cannot sync to cloud in mode %q", sync.mode)
}
_, aliasRange, err := net.ParseCIDR(node.Spec.PodCIDR)
_, aliasRange, err := netutils.ParseCIDRSloppy(node.Spec.PodCIDR)
if err != nil {
klog.Errorf("Could not parse PodCIDR (%q) for node %q: %v",
node.Spec.PodCIDR, node.Name, err)
@ -364,7 +365,7 @@ func (op *deleteOp) run(sync *NodeSync) error {
return nil
}
_, cidrRange, err := net.ParseCIDR(op.node.Spec.PodCIDR)
_, cidrRange, err := netutils.ParseCIDRSloppy(op.node.Spec.PodCIDR)
if err != nil {
klog.Errorf("Deleted node %q has an invalid podCIDR %q: %v",
op.node.Name, op.node.Spec.PodCIDR, err)

View File

@ -28,12 +28,13 @@ import (
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test"
netutils "k8s.io/utils/net"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
)
var (
_, clusterCIDRRange, _ = net.ParseCIDR("10.1.0.0/16")
_, clusterCIDRRange, _ = netutils.ParseCIDRSloppy("10.1.0.0/16")
)
type fakeEvent struct {

View File

@ -18,12 +18,14 @@ package test
import (
"net"
netutils "k8s.io/utils/net"
)
// MustParseCIDR returns the CIDR range parsed from s or panics if the string
// cannot be parsed.
func MustParseCIDR(s string) *net.IPNet {
_, ret, err := net.ParseCIDR(s)
_, ret, err := netutils.ParseCIDRSloppy(s)
if err != nil {
panic(err)
}

View File

@ -89,8 +89,8 @@ func TestNewNodeIpamControllerWithCIDRMasks(t *testing.T) {
} {
t.Run(tc.desc, func(t *testing.T) {
clusterCidrs, _ := netutils.ParseCIDRs(strings.Split(tc.clusterCIDR, ","))
_, serviceCIDRIpNet, _ := net.ParseCIDR(tc.serviceCIDR)
_, secondaryServiceCIDRIpNet, _ := net.ParseCIDR(tc.secondaryServiceCIDR)
_, serviceCIDRIpNet, _ := netutils.ParseCIDRSloppy(tc.serviceCIDR)
_, secondaryServiceCIDRIpNet, _ := netutils.ParseCIDRSloppy(tc.secondaryServiceCIDR)
if os.Getenv("EXIT_ON_FATAL") == "1" {
// This is the subprocess which runs the actual code.

View File

@ -17,7 +17,6 @@ limitations under the License.
package controlplane
import (
"net"
"reflect"
"testing"
@ -28,6 +27,7 @@ import (
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"k8s.io/kubernetes/pkg/controlplane/reconcilers"
netutils "k8s.io/utils/net"
)
func TestReconcileEndpoints(t *testing.T) {
@ -401,7 +401,7 @@ func TestReconcileEndpoints(t *testing.T) {
}
epAdapter := reconcilers.NewEndpointsAdapter(fakeClient.CoreV1(), nil)
reconciler := reconcilers.NewMasterCountEndpointReconciler(test.additionalMasters+1, epAdapter)
err := reconciler.ReconcileEndpoints(test.serviceName, net.ParseIP(test.ip), test.endpointPorts, true)
err := reconciler.ReconcileEndpoints(test.serviceName, netutils.ParseIPSloppy(test.ip), test.endpointPorts, true)
if err != nil {
t.Errorf("case %q: unexpected error: %v", test.testName, err)
}
@ -520,7 +520,7 @@ func TestReconcileEndpoints(t *testing.T) {
}
epAdapter := reconcilers.NewEndpointsAdapter(fakeClient.CoreV1(), nil)
reconciler := reconcilers.NewMasterCountEndpointReconciler(test.additionalMasters+1, epAdapter)
err := reconciler.ReconcileEndpoints(test.serviceName, net.ParseIP(test.ip), test.endpointPorts, false)
err := reconciler.ReconcileEndpoints(test.serviceName, netutils.ParseIPSloppy(test.ip), test.endpointPorts, false)
if err != nil {
t.Errorf("case %q: unexpected error: %v", test.testName, err)
}
@ -585,7 +585,7 @@ func TestEmptySubsets(t *testing.T) {
endpointPorts := []corev1.EndpointPort{
{Name: "foo", Port: 8080, Protocol: "TCP"},
}
err := reconciler.RemoveEndpoints("foo", net.ParseIP("1.2.3.4"), endpointPorts)
err := reconciler.RemoveEndpoints("foo", netutils.ParseIPSloppy("1.2.3.4"), endpointPorts)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
@ -631,7 +631,7 @@ func TestCreateOrUpdateMasterService(t *testing.T) {
master := Controller{}
fakeClient := fake.NewSimpleClientset()
master.ServiceClient = fakeClient.CoreV1()
master.CreateOrUpdateMasterServiceIfNeeded(test.serviceName, net.ParseIP("1.2.3.4"), test.servicePorts, test.serviceType, false)
master.CreateOrUpdateMasterServiceIfNeeded(test.serviceName, netutils.ParseIPSloppy("1.2.3.4"), test.servicePorts, test.serviceType, false)
creates := []core.CreateAction{}
for _, action := range fakeClient.Actions() {
if action.GetVerb() == "create" {
@ -913,7 +913,7 @@ func TestCreateOrUpdateMasterService(t *testing.T) {
master := Controller{}
fakeClient := fake.NewSimpleClientset(test.service)
master.ServiceClient = fakeClient.CoreV1()
err := master.CreateOrUpdateMasterServiceIfNeeded(test.serviceName, net.ParseIP("1.2.3.4"), test.servicePorts, test.serviceType, true)
err := master.CreateOrUpdateMasterServiceIfNeeded(test.serviceName, netutils.ParseIPSloppy("1.2.3.4"), test.servicePorts, test.serviceType, true)
if err != nil {
t.Errorf("case %q: unexpected error: %v", test.testName, err)
}
@ -972,7 +972,7 @@ func TestCreateOrUpdateMasterService(t *testing.T) {
master := Controller{}
fakeClient := fake.NewSimpleClientset(test.service)
master.ServiceClient = fakeClient.CoreV1()
err := master.CreateOrUpdateMasterServiceIfNeeded(test.serviceName, net.ParseIP("1.2.3.4"), test.servicePorts, test.serviceType, false)
err := master.CreateOrUpdateMasterServiceIfNeeded(test.serviceName, netutils.ParseIPSloppy("1.2.3.4"), test.servicePorts, test.serviceType, false)
if err != nil {
t.Errorf("case %q: unexpected error: %v", test.testName, err)
}

View File

@ -57,6 +57,7 @@ import (
certificatesrest "k8s.io/kubernetes/pkg/registry/certificates/rest"
corerest "k8s.io/kubernetes/pkg/registry/core/rest"
"k8s.io/kubernetes/pkg/registry/registrytest"
netutils "k8s.io/utils/net"
"github.com/stretchr/testify/assert"
)
@ -72,7 +73,7 @@ func setUp(t *testing.T) (*etcd3testing.EtcdTestServer, Config, *assert.Assertio
APIServerServicePort: 443,
MasterCount: 1,
EndpointReconcilerType: reconcilers.MasterCountReconcilerType,
ServiceIPRange: net.IPNet{IP: net.ParseIP("10.0.0.0"), Mask: net.CIDRMask(24, 32)},
ServiceIPRange: net.IPNet{IP: netutils.ParseIPSloppy("10.0.0.0"), Mask: net.CIDRMask(24, 32)},
},
}
@ -101,7 +102,7 @@ func setUp(t *testing.T) (*etcd3testing.EtcdTestServer, Config, *assert.Assertio
config.GenericConfig.Version = &kubeVersion
config.ExtraConfig.StorageFactory = storageFactory
config.GenericConfig.LoopbackClientConfig = &restclient.Config{APIPath: "/api", ContentConfig: restclient.ContentConfig{NegotiatedSerializer: legacyscheme.Codecs}}
config.GenericConfig.PublicAddress = net.ParseIP("192.168.10.4")
config.GenericConfig.PublicAddress = netutils.ParseIPSloppy("192.168.10.4")
config.GenericConfig.LegacyAPIGroupPrefixes = sets.NewString("/api")
config.ExtraConfig.KubeletClientConfig = kubeletclient.KubeletClientConfig{Port: 10250}
config.ExtraConfig.ProxyTransport = utilnet.SetTransportDefaults(&http.Transport{

View File

@ -23,7 +23,6 @@ https://github.com/openshift/origin/blob/bb340c5dd5ff72718be86fb194dedc0faed7f4c
import (
"context"
"net"
"reflect"
"testing"
@ -31,6 +30,7 @@ import (
discoveryv1 "k8s.io/api/discovery/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
netutils "k8s.io/utils/net"
)
type fakeLeases struct {
@ -459,7 +459,7 @@ func TestLeaseEndpointReconciler(t *testing.T) {
epAdapter := EndpointsAdapter{endpointClient: clientset.CoreV1()}
r := NewLeaseEndpointReconciler(epAdapter, fakeLeases)
err := r.ReconcileEndpoints(test.serviceName, net.ParseIP(test.ip), test.endpointPorts, true)
err := r.ReconcileEndpoints(test.serviceName, netutils.ParseIPSloppy(test.ip), test.endpointPorts, true)
if err != nil {
t.Errorf("case %q: unexpected error: %v", test.testName, err)
}
@ -560,7 +560,7 @@ func TestLeaseEndpointReconciler(t *testing.T) {
}
epAdapter := EndpointsAdapter{endpointClient: clientset.CoreV1()}
r := NewLeaseEndpointReconciler(epAdapter, fakeLeases)
err := r.ReconcileEndpoints(test.serviceName, net.ParseIP(test.ip), test.endpointPorts, false)
err := r.ReconcileEndpoints(test.serviceName, netutils.ParseIPSloppy(test.ip), test.endpointPorts, false)
if err != nil {
t.Errorf("case %q: unexpected error: %v", test.testName, err)
}
@ -680,7 +680,7 @@ func TestLeaseRemoveEndpoints(t *testing.T) {
}
epAdapter := EndpointsAdapter{endpointClient: clientset.CoreV1()}
r := NewLeaseEndpointReconciler(epAdapter, fakeLeases)
err := r.RemoveEndpoints(test.serviceName, net.ParseIP(test.ip), test.endpointPorts)
err := r.RemoveEndpoints(test.serviceName, netutils.ParseIPSloppy(test.ip), test.endpointPorts)
if err != nil {
t.Errorf("case %q: unexpected error: %v", test.testName, err)
}

View File

@ -20,13 +20,14 @@ import (
"net"
utilnet "k8s.io/apimachinery/pkg/util/net"
netutils "k8s.io/utils/net"
)
// DefaultServiceNodePortRange is the default port range for NodePort services.
var DefaultServiceNodePortRange = utilnet.PortRange{Base: 30000, Size: 2768}
// DefaultServiceIPCIDR is a CIDR notation of IP range from which to allocate service cluster IPs
var DefaultServiceIPCIDR = net.IPNet{IP: net.ParseIP("10.0.0.0"), Mask: net.CIDRMask(24, 32)}
var DefaultServiceIPCIDR = net.IPNet{IP: netutils.ParseIPSloppy("10.0.0.0"), Mask: net.CIDRMask(24, 32)}
// DefaultEtcdPathPrefix is the default key prefix of etcd for API Server
const DefaultEtcdPathPrefix = "/registry"

View File

@ -18,16 +18,15 @@ limitations under the License.
package options
import (
"net"
genericoptions "k8s.io/apiserver/pkg/server/options"
netutils "k8s.io/utils/net"
)
// NewSecureServingOptions gives default values for the kube-apiserver which are not the options wanted by
// "normal" API servers running on the platform
func NewSecureServingOptions() *genericoptions.SecureServingOptionsWithLoopback {
o := genericoptions.SecureServingOptions{
BindAddress: net.ParseIP("0.0.0.0"),
BindAddress: netutils.ParseIPSloppy("0.0.0.0"),
BindPort: 6443,
Required: true,
ServerCert: genericoptions.GeneratableKeyCert{

View File

@ -35,6 +35,7 @@ import (
"k8s.io/component-base/metrics/legacyregistry"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/pkg/kubelet/metrics"
netutils "k8s.io/utils/net"
)
// NewKubeletServerCertificateManager creates a certificate manager for the kubelet when retrieving a server certificate
@ -159,13 +160,13 @@ func addressesToHostnamesAndIPs(addresses []v1.NodeAddress) (dnsNames []string,
switch address.Type {
case v1.NodeHostName:
if ip := net.ParseIP(address.Address); ip != nil {
if ip := netutils.ParseIPSloppy(address.Address); ip != nil {
seenIPs[address.Address] = true
} else {
seenDNSNames[address.Address] = true
}
case v1.NodeExternalIP, v1.NodeInternalIP:
if ip := net.ParseIP(address.Address); ip != nil {
if ip := netutils.ParseIPSloppy(address.Address); ip != nil {
seenIPs[address.Address] = true
}
case v1.NodeExternalDNS, v1.NodeInternalDNS:
@ -177,7 +178,7 @@ func addressesToHostnamesAndIPs(addresses []v1.NodeAddress) (dnsNames []string,
dnsNames = append(dnsNames, dnsName)
}
for ip := range seenIPs {
ips = append(ips, net.ParseIP(ip))
ips = append(ips, netutils.ParseIPSloppy(ip))
}
// return in stable order

View File

@ -21,7 +21,8 @@ import (
"reflect"
"testing"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
netutils "k8s.io/utils/net"
)
func TestAddressesToHostnamesAndIPs(t *testing.T) {
@ -62,7 +63,7 @@ func TestAddressesToHostnamesAndIPs(t *testing.T) {
{Type: v1.NodeExternalIP, Address: "1.1.1.1"},
},
wantDNSNames: []string{"hostname"},
wantIPs: []net.IP{net.ParseIP("1.1.1.1")},
wantIPs: []net.IP{netutils.ParseIPSloppy("1.1.1.1")},
},
{
name: "order values",
@ -75,7 +76,7 @@ func TestAddressesToHostnamesAndIPs(t *testing.T) {
{Type: v1.NodeInternalIP, Address: "3.3.3.3"},
},
wantDNSNames: []string{"hostname-1", "hostname-2", "hostname-3"},
wantIPs: []net.IP{net.ParseIP("1.1.1.1"), net.ParseIP("2.2.2.2"), net.ParseIP("3.3.3.3")},
wantIPs: []net.IP{netutils.ParseIPSloppy("1.1.1.1"), netutils.ParseIPSloppy("2.2.2.2"), netutils.ParseIPSloppy("3.3.3.3")},
},
{
name: "handle IP and DNS hostnames",
@ -84,7 +85,7 @@ func TestAddressesToHostnamesAndIPs(t *testing.T) {
{Type: v1.NodeHostName, Address: "1.1.1.1"},
},
wantDNSNames: []string{"hostname"},
wantIPs: []net.IP{net.ParseIP("1.1.1.1")},
wantIPs: []net.IP{netutils.ParseIPSloppy("1.1.1.1")},
},
}
for _, tt := range tests {

View File

@ -21,12 +21,12 @@ package hostport
import (
"bytes"
"fmt"
"net"
"strings"
"time"
"k8s.io/apimachinery/pkg/util/sets"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
netutils "k8s.io/utils/net"
)
type fakeChain struct {
@ -192,7 +192,7 @@ func normalizeRule(rule string) (string, error) {
arg := remaining[:end]
// Normalize un-prefixed IP addresses like iptables does
if net.ParseIP(arg) != nil {
if netutils.ParseIPSloppy(arg) != nil {
arg += "/32"
}

View File

@ -20,7 +20,6 @@ package hostport
import (
"bytes"
"net"
"strings"
"testing"
@ -28,6 +27,7 @@ import (
v1 "k8s.io/api/core/v1"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
"k8s.io/utils/exec"
netutils "k8s.io/utils/net"
)
func TestOpenCloseHostports(t *testing.T) {
@ -249,7 +249,7 @@ func TestHostportManager(t *testing.T) {
mapping: &PodPortMapping{
Name: "pod1",
Namespace: "ns1",
IP: net.ParseIP("10.1.1.2"),
IP: netutils.ParseIPSloppy("10.1.1.2"),
HostNetwork: false,
PortMappings: []*PortMapping{
{
@ -276,7 +276,7 @@ func TestHostportManager(t *testing.T) {
mapping: &PodPortMapping{
Name: "pod2",
Namespace: "ns1",
IP: net.ParseIP("10.1.1.3"),
IP: netutils.ParseIPSloppy("10.1.1.3"),
HostNetwork: false,
PortMappings: []*PortMapping{
{
@ -303,7 +303,7 @@ func TestHostportManager(t *testing.T) {
mapping: &PodPortMapping{
Name: "pod3",
Namespace: "ns1",
IP: net.ParseIP("10.1.1.4"),
IP: netutils.ParseIPSloppy("10.1.1.4"),
HostNetwork: false,
PortMappings: []*PortMapping{
{
@ -320,7 +320,7 @@ func TestHostportManager(t *testing.T) {
mapping: &PodPortMapping{
Name: "pod3",
Namespace: "ns1",
IP: net.ParseIP("192.168.12.12"),
IP: netutils.ParseIPSloppy("192.168.12.12"),
HostNetwork: false,
PortMappings: []*PortMapping{
{
@ -337,7 +337,7 @@ func TestHostportManager(t *testing.T) {
mapping: &PodPortMapping{
Name: "pod4",
Namespace: "ns1",
IP: net.ParseIP("2001:beef::2"),
IP: netutils.ParseIPSloppy("2001:beef::2"),
HostNetwork: false,
PortMappings: []*PortMapping{
{
@ -356,7 +356,7 @@ func TestHostportManager(t *testing.T) {
mapping: &PodPortMapping{
Name: "pod5",
Namespace: "ns5",
IP: net.ParseIP("10.1.1.5"),
IP: netutils.ParseIPSloppy("10.1.1.5"),
HostNetwork: false,
PortMappings: []*PortMapping{
{
@ -380,7 +380,7 @@ func TestHostportManager(t *testing.T) {
mapping: &PodPortMapping{
Name: "pod6",
Namespace: "ns1",
IP: net.ParseIP("10.1.1.2"),
IP: netutils.ParseIPSloppy("10.1.1.2"),
HostNetwork: false,
PortMappings: []*PortMapping{
{
@ -555,7 +555,7 @@ func TestHostportManagerIPv6(t *testing.T) {
mapping: &PodPortMapping{
Name: "pod1",
Namespace: "ns1",
IP: net.ParseIP("2001:beef::2"),
IP: netutils.ParseIPSloppy("2001:beef::2"),
HostNetwork: false,
PortMappings: []*PortMapping{
{
@ -581,7 +581,7 @@ func TestHostportManagerIPv6(t *testing.T) {
mapping: &PodPortMapping{
Name: "pod2",
Namespace: "ns1",
IP: net.ParseIP("2001:beef::3"),
IP: netutils.ParseIPSloppy("2001:beef::3"),
HostNetwork: false,
PortMappings: []*PortMapping{
{
@ -607,7 +607,7 @@ func TestHostportManagerIPv6(t *testing.T) {
mapping: &PodPortMapping{
Name: "pod3",
Namespace: "ns1",
IP: net.ParseIP("2001:beef::4"),
IP: netutils.ParseIPSloppy("2001:beef::4"),
HostNetwork: false,
PortMappings: []*PortMapping{
{
@ -623,7 +623,7 @@ func TestHostportManagerIPv6(t *testing.T) {
mapping: &PodPortMapping{
Name: "pod4",
Namespace: "ns2",
IP: net.ParseIP("192.168.2.2"),
IP: netutils.ParseIPSloppy("192.168.2.2"),
HostNetwork: false,
PortMappings: []*PortMapping{
{

View File

@ -259,7 +259,7 @@ func (plugin *kubenetNetworkPlugin) Event(name string, details map[string]interf
}
for idx, currentPodCIDR := range podCIDRs {
_, cidr, err := net.ParseCIDR(currentPodCIDR)
_, cidr, err := netutils.ParseCIDRSloppy(currentPodCIDR)
if nil != err {
klog.InfoS("Failed to generate CNI network config with cidr at the index", "podCIDR", currentPodCIDR, "index", idx, "err", err)
return
@ -451,7 +451,7 @@ func (plugin *kubenetNetworkPlugin) addPortMapping(id kubecontainer.ContainerID,
Namespace: namespace,
Name: name,
PortMappings: portMappings,
IP: net.ParseIP(ip),
IP: netutils.ParseIPSloppy(ip),
HostNetwork: false,
}
if netutils.IsIPv6(pm.IP) {
@ -635,7 +635,7 @@ func (plugin *kubenetNetworkPlugin) getNetworkStatus(id kubecontainer.ContainerI
ips := make([]net.IP, 0, len(iplist))
for _, ip := range iplist {
ips = append(ips, net.ParseIP(ip))
ips = append(ips, netutils.ParseIPSloppy(ip))
}
return &network.PodNetworkStatus{

View File

@ -40,6 +40,7 @@ import (
sysctltest "k8s.io/kubernetes/pkg/util/sysctl/testing"
"k8s.io/utils/exec"
fakeexec "k8s.io/utils/exec/testing"
netutils "k8s.io/utils/net"
)
// test it fulfills the NetworkPlugin interface
@ -337,7 +338,7 @@ func TestGetRoutesConfig(t *testing.T) {
} {
var cidrs []*net.IPNet
for _, c := range test.cidrs {
_, cidr, err := net.ParseCIDR(c)
_, cidr, err := netutils.ParseCIDRSloppy(c)
assert.NoError(t, err)
cidrs = append(cidrs, cidr)
}
@ -378,7 +379,7 @@ func TestGetRangesConfig(t *testing.T) {
} {
var cidrs []*net.IPNet
for _, c := range test.cidrs {
_, cidr, err := net.ParseCIDR(c)
_, cidr, err := netutils.ParseCIDRSloppy(c)
assert.NoError(t, err)
cidrs = append(cidrs, cidr)
}

View File

@ -36,6 +36,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/dockershim/network/metrics"
utilsysctl "k8s.io/kubernetes/pkg/util/sysctl"
utilexec "k8s.io/utils/exec"
netutils "k8s.io/utils/net"
utilfeature "k8s.io/apiserver/pkg/util/feature"
kubefeatures "k8s.io/kubernetes/pkg/features"
@ -248,7 +249,7 @@ func getOnePodIP(execer utilexec.Interface, nsenterPath, netnsPath, interfaceNam
if len(fields) < 4 {
return nil, fmt.Errorf("unexpected address output %s ", lines[0])
}
ip, _, err := net.ParseCIDR(fields[3])
ip, _, err := netutils.ParseCIDRSloppy(fields[3])
if err != nil {
return nil, fmt.Errorf("CNI failed to parse ip from output %s due to %v", output, err)
}

View File

@ -20,7 +20,6 @@ package testing
import (
"fmt"
"net"
"sync"
"testing"
@ -29,6 +28,7 @@ import (
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/dockershim/network"
sysctltest "k8s.io/kubernetes/pkg/util/sysctl/testing"
netutils "k8s.io/utils/net"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
@ -96,7 +96,7 @@ func TestPluginManager(t *testing.T) {
containerID := kubecontainer.ContainerID{ID: podName}
fnp.EXPECT().SetUpPod("", podName, containerID).Return(nil).Times(4)
fnp.EXPECT().GetPodNetworkStatus("", podName, containerID).Return(&network.PodNetworkStatus{IP: net.ParseIP("1.2.3.4")}, nil).Times(4)
fnp.EXPECT().GetPodNetworkStatus("", podName, containerID).Return(&network.PodNetworkStatus{IP: netutils.ParseIPSloppy("1.2.3.4")}, nil).Times(4)
fnp.EXPECT().TearDownPod("", podName, containerID).Return(nil).Times(4)
for x := 0; x < 4; x++ {
@ -173,7 +173,7 @@ func (p *hookableFakeNetworkPlugin) TearDownPod(string, string, kubecontainer.Co
}
func (p *hookableFakeNetworkPlugin) GetPodNetworkStatus(string, string, kubecontainer.ContainerID) (*network.PodNetworkStatus, error) {
return &network.PodNetworkStatus{IP: net.ParseIP("10.1.2.3")}, nil
return &network.PodNetworkStatus{IP: netutils.ParseIPSloppy("10.1.2.3")}, nil
}
func (p *hookableFakeNetworkPlugin) Status() error {

View File

@ -38,6 +38,7 @@ import (
libcontaineruserns "github.com/opencontainers/runc/libcontainer/userns"
"k8s.io/mount-utils"
"k8s.io/utils/integer"
netutils "k8s.io/utils/net"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -505,7 +506,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
clusterDNS := make([]net.IP, 0, len(kubeCfg.ClusterDNS))
for _, ipEntry := range kubeCfg.ClusterDNS {
ip := net.ParseIP(ipEntry)
ip := netutils.ParseIPSloppy(ipEntry)
if ip == nil {
klog.InfoS("Invalid clusterDNS IP", "IP", ipEntry)
} else {

View File

@ -58,6 +58,7 @@ import (
kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager"
taintutil "k8s.io/kubernetes/pkg/util/taints"
"k8s.io/kubernetes/pkg/volume/util"
netutils "k8s.io/utils/net"
)
const (
@ -2484,7 +2485,7 @@ func TestValidateNodeIPParam(t *testing.T) {
tests = append(tests, successTest)
}
for _, test := range tests {
err := validateNodeIP(net.ParseIP(test.nodeIP))
err := validateNodeIP(netutils.ParseIPSloppy(test.nodeIP))
if test.success {
assert.NoError(t, err, "test %s", test.testName)
} else {

View File

@ -42,6 +42,7 @@ import (
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/record"
featuregatetesting "k8s.io/component-base/featuregate/testing"
netutils "k8s.io/utils/net"
// TODO: remove this import if
// api.Registry.GroupOrDie(v1.GroupName).GroupVersions[0].String() is changed
@ -3426,7 +3427,7 @@ func TestGenerateAPIPodStatusPodIPs(t *testing.T) {
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
if tc.nodeIP != "" {
kl.nodeIPs = []net.IP{net.ParseIP(tc.nodeIP)}
kl.nodeIPs = []net.IP{netutils.ParseIPSloppy(tc.nodeIP)}
}
pod := podWithUIDNameNs("12345", "test-pod", "test-namespace")
@ -3530,7 +3531,7 @@ func TestSortPodIPs(t *testing.T) {
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
if tc.nodeIP != "" {
kl.nodeIPs = []net.IP{net.ParseIP(tc.nodeIP)}
kl.nodeIPs = []net.IP{netutils.ParseIPSloppy(tc.nodeIP)}
}
podIPs := kl.sortPodIPs(tc.podIPs)

View File

@ -18,7 +18,6 @@ package kuberuntime
import (
"fmt"
"net"
"net/url"
"runtime"
"sort"
@ -33,6 +32,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util"
"k8s.io/kubernetes/pkg/kubelet/util/format"
netutils "k8s.io/utils/net"
)
// createPodSandbox creates a pod sandbox and returns (podSandBoxID, message, error).
@ -298,7 +298,7 @@ func (m *kubeGenericRuntimeManager) determinePodSandboxIPs(podNamespace, podName
// pick primary IP
if len(podSandbox.Network.Ip) != 0 {
if net.ParseIP(podSandbox.Network.Ip) == nil {
if netutils.ParseIPSloppy(podSandbox.Network.Ip) == nil {
klog.InfoS("Pod Sandbox reported an unparseable primary IP", "pod", klog.KRef(podNamespace, podName), "IP", podSandbox.Network.Ip)
return nil
}
@ -307,7 +307,7 @@ func (m *kubeGenericRuntimeManager) determinePodSandboxIPs(podNamespace, podName
// pick additional ips, if cri reported them
for _, podIP := range podSandbox.Network.AdditionalIps {
if nil == net.ParseIP(podIP.Ip) {
if nil == netutils.ParseIPSloppy(podIP.Ip) {
klog.InfoS("Pod Sandbox reported an unparseable additional IP", "pod", klog.KRef(podNamespace, podName), "IP", podIP.Ip)
return nil
}

View File

@ -35,6 +35,7 @@ import (
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/apis/core/validation"
"k8s.io/kubernetes/pkg/features"
netutils "k8s.io/utils/net"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -350,7 +351,7 @@ func TestGetPodDNSType(t *testing.T) {
}
testClusterDNSDomain := "TEST"
clusterNS := "203.0.113.1"
testClusterDNS := []net.IP{net.ParseIP(clusterNS)}
testClusterDNS := []net.IP{netutils.ParseIPSloppy(clusterNS)}
configurer := NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
@ -477,7 +478,7 @@ func testGetPodDNS(t *testing.T) {
}
clusterNS := "203.0.113.1"
testClusterDNSDomain := "kubernetes.io"
testClusterDNS := []net.IP{net.ParseIP(clusterNS)}
testClusterDNS := []net.IP{netutils.ParseIPSloppy(clusterNS)}
configurer := NewConfigurer(recorder, nodeRef, nil, testClusterDNS, testClusterDNSDomain, "")
@ -606,7 +607,7 @@ func TestGetPodDNSCustom(t *testing.T) {
t.Fatal(err)
}
configurer := NewConfigurer(recorder, nodeRef, nil, []net.IP{net.ParseIP(testClusterNameserver)}, testClusterDNSDomain, tmpfile.Name())
configurer := NewConfigurer(recorder, nodeRef, nil, []net.IP{netutils.ParseIPSloppy(testClusterNameserver)}, testClusterDNSDomain, tmpfile.Name())
testCases := []struct {
desc string

View File

@ -42,6 +42,7 @@ import (
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/volume"
netutils "k8s.io/utils/net"
"k8s.io/klog/v2"
)
@ -149,13 +150,13 @@ func NodeAddress(nodeIPs []net.IP, // typically Kubelet.nodeIPs
// prefer addresses of the matching family
sortedAddresses := make([]v1.NodeAddress, 0, len(cloudNodeAddresses))
for _, nodeAddress := range cloudNodeAddresses {
ip := net.ParseIP(nodeAddress.Address)
ip := netutils.ParseIPSloppy(nodeAddress.Address)
if ip == nil || isPreferredIPFamily(ip) {
sortedAddresses = append(sortedAddresses, nodeAddress)
}
}
for _, nodeAddress := range cloudNodeAddresses {
ip := net.ParseIP(nodeAddress.Address)
ip := netutils.ParseIPSloppy(nodeAddress.Address)
if ip != nil && !isPreferredIPFamily(ip) {
sortedAddresses = append(sortedAddresses, nodeAddress)
}
@ -219,7 +220,7 @@ func NodeAddress(nodeIPs []net.IP, // typically Kubelet.nodeIPs
// unless nodeIP is "::", in which case it is reversed.
if nodeIPSpecified {
ipAddr = nodeIP
} else if addr := net.ParseIP(hostname); addr != nil {
} else if addr := netutils.ParseIPSloppy(hostname); addr != nil {
ipAddr = addr
} else {
var addrs []net.IP

View File

@ -27,7 +27,7 @@ import (
cadvisorapiv1 "github.com/google/cadvisor/info/v1"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -44,6 +44,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
netutils "k8s.io/utils/net"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -66,7 +67,7 @@ func TestNodeAddress(t *testing.T) {
}{
{
name: "A single InternalIP",
nodeIP: net.ParseIP("10.1.1.1"),
nodeIP: netutils.ParseIPSloppy("10.1.1.1"),
nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
{Type: v1.NodeHostName, Address: testKubeletHostname},
@ -79,7 +80,7 @@ func TestNodeAddress(t *testing.T) {
},
{
name: "NodeIP is external",
nodeIP: net.ParseIP("55.55.55.55"),
nodeIP: netutils.ParseIPSloppy("55.55.55.55"),
nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
{Type: v1.NodeExternalIP, Address: "55.55.55.55"},
@ -95,7 +96,7 @@ func TestNodeAddress(t *testing.T) {
{
// Accommodating #45201 and #49202
name: "InternalIP and ExternalIP are the same",
nodeIP: net.ParseIP("55.55.55.55"),
nodeIP: netutils.ParseIPSloppy("55.55.55.55"),
nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "44.44.44.44"},
{Type: v1.NodeExternalIP, Address: "44.44.44.44"},
@ -112,7 +113,7 @@ func TestNodeAddress(t *testing.T) {
},
{
name: "An Internal/ExternalIP, an Internal/ExternalDNS",
nodeIP: net.ParseIP("10.1.1.1"),
nodeIP: netutils.ParseIPSloppy("10.1.1.1"),
nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
{Type: v1.NodeExternalIP, Address: "55.55.55.55"},
@ -131,7 +132,7 @@ func TestNodeAddress(t *testing.T) {
},
{
name: "An Internal with multiple internal IPs",
nodeIP: net.ParseIP("10.1.1.1"),
nodeIP: netutils.ParseIPSloppy("10.1.1.1"),
nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
{Type: v1.NodeInternalIP, Address: "10.2.2.2"},
@ -148,7 +149,7 @@ func TestNodeAddress(t *testing.T) {
},
{
name: "An InternalIP that isn't valid: should error",
nodeIP: net.ParseIP("10.2.2.2"),
nodeIP: netutils.ParseIPSloppy("10.2.2.2"),
nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
{Type: v1.NodeExternalIP, Address: "55.55.55.55"},
@ -181,7 +182,7 @@ func TestNodeAddress(t *testing.T) {
},
{
name: "cloud reports hostname, nodeIP is set, no override",
nodeIP: net.ParseIP("10.1.1.1"),
nodeIP: netutils.ParseIPSloppy("10.1.1.1"),
nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
{Type: v1.NodeExternalIP, Address: "55.55.55.55"},
@ -211,7 +212,7 @@ func TestNodeAddress(t *testing.T) {
},
{
name: "cloud provider is external",
nodeIP: net.ParseIP("10.0.0.1"),
nodeIP: netutils.ParseIPSloppy("10.0.0.1"),
nodeAddresses: []v1.NodeAddress{},
externalCloudProvider: true,
expectedAddresses: []v1.NodeAddress{
@ -250,7 +251,7 @@ func TestNodeAddress(t *testing.T) {
},
{
name: "cloud doesn't report hostname, nodeIP is set, no override, detected hostname match",
nodeIP: net.ParseIP("10.1.1.1"),
nodeIP: netutils.ParseIPSloppy("10.1.1.1"),
nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
{Type: v1.NodeExternalIP, Address: "55.55.55.55"},
@ -266,7 +267,7 @@ func TestNodeAddress(t *testing.T) {
},
{
name: "cloud doesn't report hostname, nodeIP is set, no override, detected hostname match with same type as nodeIP",
nodeIP: net.ParseIP("10.1.1.1"),
nodeIP: netutils.ParseIPSloppy("10.1.1.1"),
nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
{Type: v1.NodeInternalIP, Address: testKubeletHostname}, // cloud-reported address value matches detected hostname
@ -323,7 +324,7 @@ func TestNodeAddress(t *testing.T) {
},
{
name: "Dual-stack cloud, IPv4 first, request IPv4",
nodeIP: net.ParseIP("0.0.0.0"),
nodeIP: netutils.ParseIPSloppy("0.0.0.0"),
nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
{Type: v1.NodeInternalIP, Address: "fc01:1234::5678"},
@ -338,7 +339,7 @@ func TestNodeAddress(t *testing.T) {
},
{
name: "Dual-stack cloud, IPv6 first, request IPv4",
nodeIP: net.ParseIP("0.0.0.0"),
nodeIP: netutils.ParseIPSloppy("0.0.0.0"),
nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "fc01:1234::5678"},
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
@ -353,7 +354,7 @@ func TestNodeAddress(t *testing.T) {
},
{
name: "Dual-stack cloud, IPv4 first, request IPv6",
nodeIP: net.ParseIP("::"),
nodeIP: netutils.ParseIPSloppy("::"),
nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
{Type: v1.NodeInternalIP, Address: "fc01:1234::5678"},
@ -368,7 +369,7 @@ func TestNodeAddress(t *testing.T) {
},
{
name: "Dual-stack cloud, IPv6 first, request IPv6",
nodeIP: net.ParseIP("::"),
nodeIP: netutils.ParseIPSloppy("::"),
nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "fc01:1234::5678"},
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
@ -448,7 +449,7 @@ func TestNodeAddress_NoCloudProvider(t *testing.T) {
}{
{
name: "Single --node-ip",
nodeIPs: []net.IP{net.ParseIP("10.1.1.1")},
nodeIPs: []net.IP{netutils.ParseIPSloppy("10.1.1.1")},
expectedAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
{Type: v1.NodeHostName, Address: testKubeletHostname},
@ -456,7 +457,7 @@ func TestNodeAddress_NoCloudProvider(t *testing.T) {
},
{
name: "Dual --node-ips",
nodeIPs: []net.IP{net.ParseIP("10.1.1.1"), net.ParseIP("fd01::1234")},
nodeIPs: []net.IP{netutils.ParseIPSloppy("10.1.1.1"), netutils.ParseIPSloppy("fd01::1234")},
expectedAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
{Type: v1.NodeInternalIP, Address: "fd01::1234"},

View File

@ -41,6 +41,7 @@ import (
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/kubelet/metrics/collectors"
"k8s.io/utils/clock"
netutils "k8s.io/utils/net"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -144,7 +145,7 @@ func ListenAndServeKubeletServer(
tlsOptions *TLSOptions,
auth AuthInterface) {
address := net.ParseIP(kubeCfg.Address)
address := netutils.ParseIPSloppy(kubeCfg.Address)
port := uint(kubeCfg.Port)
klog.InfoS("Starting to listen", "address", address, "port", port)
handler := NewServer(host, resourceAnalyzer, auth, kubeCfg)

View File

@ -18,7 +18,6 @@ package kubemark
import (
"fmt"
"net"
"time"
v1 "k8s.io/api/core/v1"
@ -35,6 +34,7 @@ import (
utilnode "k8s.io/kubernetes/pkg/util/node"
utilsysctl "k8s.io/kubernetes/pkg/util/sysctl"
utilexec "k8s.io/utils/exec"
netutils "k8s.io/utils/net"
utilpointer "k8s.io/utils/pointer"
"k8s.io/klog/v2"
@ -83,7 +83,7 @@ func NewHollowProxyOrDie(
nodeIP := utilnode.GetNodeIP(client, nodeName)
if nodeIP == nil {
klog.V(0).Infof("can't determine this node's IP, assuming 127.0.0.1")
nodeIP = net.ParseIP("127.0.0.1")
nodeIP = netutils.ParseIPSloppy("127.0.0.1")
}
// Real proxier with fake iptables, sysctl, etc underneath it.
//var err error

View File

@ -18,7 +18,6 @@ package v1alpha1
import (
"fmt"
"net"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -28,6 +27,7 @@ import (
"k8s.io/kubernetes/pkg/cluster/ports"
"k8s.io/kubernetes/pkg/kubelet/qos"
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
netutils "k8s.io/utils/net"
"k8s.io/utils/pointer"
)
@ -131,7 +131,7 @@ func SetDefaults_KubeProxyConfiguration(obj *kubeproxyconfigv1alpha1.KubeProxyCo
// based on the given bind address. IPv6 addresses are enclosed in square
// brackets for appending port.
func getDefaultAddresses(bindAddress string) (defaultHealthzAddress, defaultMetricsAddress string) {
if net.ParseIP(bindAddress).To4() != nil {
if netutils.ParseIPSloppy(bindAddress).To4() != nil {
return "0.0.0.0", "127.0.0.1"
}
return "[::]", "[::1]"

View File

@ -66,7 +66,7 @@ func Validate(config *kubeproxyconfig.KubeProxyConfiguration) field.ErrorList {
allErrs = append(allErrs, field.Invalid(newPath.Child("ConfigSyncPeriod"), config.ConfigSyncPeriod, "must be greater than 0"))
}
if net.ParseIP(config.BindAddress) == nil {
if netutils.ParseIPSloppy(config.BindAddress) == nil {
allErrs = append(allErrs, field.Invalid(newPath.Child("BindAddress"), config.BindAddress, "not a valid textual representation of an IP address"))
}
@ -94,7 +94,7 @@ func Validate(config *kubeproxyconfig.KubeProxyConfiguration) field.ErrorList {
allErrs = append(allErrs, field.Invalid(newPath.Child("ClusterCIDR"), config.ClusterCIDR, "only one CIDR allowed (e.g. 10.100.0.0/16 or fde4:8dba:82e1::/48)"))
// if we are here means that len(cidrs) == 1, we need to validate it
default:
if _, _, err := net.ParseCIDR(config.ClusterCIDR); err != nil {
if _, _, err := netutils.ParseCIDRSloppy(config.ClusterCIDR); err != nil {
allErrs = append(allErrs, field.Invalid(newPath.Child("ClusterCIDR"), config.ClusterCIDR, "must be a valid CIDR block (e.g. 10.100.0.0/16 or fde4:8dba:82e1::/48)"))
}
}
@ -228,7 +228,7 @@ func validateHostPort(input string, fldPath *field.Path) field.ErrorList {
return allErrs
}
if ip := net.ParseIP(hostIP); ip == nil {
if ip := netutils.ParseIPSloppy(hostIP); ip == nil {
allErrs = append(allErrs, field.Invalid(fldPath, hostIP, "must be a valid IP"))
}
@ -275,7 +275,7 @@ func validateKubeProxyNodePortAddress(nodePortAddresses []string, fldPath *field
allErrs := field.ErrorList{}
for i := range nodePortAddresses {
if _, _, err := net.ParseCIDR(nodePortAddresses[i]); err != nil {
if _, _, err := netutils.ParseCIDRSloppy(nodePortAddresses[i]); err != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Index(i), nodePortAddresses[i], "must be a valid CIDR"))
}
}
@ -305,7 +305,7 @@ func validateIPVSExcludeCIDRs(excludeCIDRs []string, fldPath *field.Path) field.
allErrs := field.ErrorList{}
for i := range excludeCIDRs {
if _, _, err := net.ParseCIDR(excludeCIDRs[i]); err != nil {
if _, _, err := netutils.ParseCIDRSloppy(excludeCIDRs[i]); err != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Index(i), excludeCIDRs[i], "must be a valid CIDR"))
}
}

View File

@ -53,7 +53,7 @@ import (
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
utilsysctl "k8s.io/kubernetes/pkg/util/sysctl"
utilexec "k8s.io/utils/exec"
utilnet "k8s.io/utils/net"
netutils "k8s.io/utils/net"
)
const (
@ -189,7 +189,7 @@ type Proxier struct {
mu sync.Mutex // protects the following fields
serviceMap proxy.ServiceMap
endpointsMap proxy.EndpointsMap
portsMap map[utilnet.LocalPort]utilnet.Closeable
portsMap map[netutils.LocalPort]netutils.Closeable
nodeLabels map[string]string
// endpointSlicesSynced, and servicesSynced are set to true
// when corresponding objects are synced after startup. This is used to avoid
@ -208,7 +208,7 @@ type Proxier struct {
localDetector proxyutiliptables.LocalTrafficDetector
hostname string
nodeIP net.IP
portMapper utilnet.PortOpener
portMapper netutils.PortOpener
recorder events.EventRecorder
serviceHealthServer healthcheck.ServiceHealthServer
@ -295,7 +295,7 @@ func NewProxier(ipt utiliptables.Interface,
}
proxier := &Proxier{
portsMap: make(map[utilnet.LocalPort]utilnet.Closeable),
portsMap: make(map[netutils.LocalPort]netutils.Closeable),
serviceMap: make(proxy.ServiceMap),
serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, ipFamily, recorder, nil),
endpointsMap: make(proxy.EndpointsMap),
@ -308,7 +308,7 @@ func NewProxier(ipt utiliptables.Interface,
localDetector: localDetector,
hostname: hostname,
nodeIP: nodeIP,
portMapper: &utilnet.ListenPortOpener,
portMapper: &netutils.ListenPortOpener,
recorder: recorder,
serviceHealthServer: serviceHealthServer,
healthzServer: healthzServer,
@ -966,7 +966,7 @@ func (proxier *Proxier) syncProxyRules() {
activeNATChains := map[utiliptables.Chain]bool{} // use a map as a set
// Accumulate the set of local ports that we will be holding open once this update is complete
replacementPortsMap := map[utilnet.LocalPort]utilnet.Closeable{}
replacementPortsMap := map[netutils.LocalPort]netutils.Closeable{}
// We are creating those slices ones here to avoid memory reallocations
// in every loop. Note that reuse the memory, instead of doing:
@ -1006,10 +1006,10 @@ func (proxier *Proxier) syncProxyRules() {
klog.ErrorS(nil, "Failed to cast serviceInfo", "svcName", svcName.String())
continue
}
isIPv6 := utilnet.IsIPv6(svcInfo.ClusterIP())
localPortIPFamily := utilnet.IPv4
isIPv6 := netutils.IsIPv6(svcInfo.ClusterIP())
localPortIPFamily := netutils.IPv4
if isIPv6 {
localPortIPFamily = utilnet.IPv6
localPortIPFamily = netutils.IPv6
}
protocol := strings.ToLower(string(svcInfo.Protocol()))
svcNameString := svcInfo.serviceNameString
@ -1082,13 +1082,13 @@ func (proxier *Proxier) syncProxyRules() {
// If the "external" IP happens to be an IP that is local to this
// machine, hold the local port open so no other process can open it
// (because the socket might open but it would never work).
if (svcInfo.Protocol() != v1.ProtocolSCTP) && localAddrSet.Has(net.ParseIP(externalIP)) {
lp := utilnet.LocalPort{
if (svcInfo.Protocol() != v1.ProtocolSCTP) && localAddrSet.Has(netutils.ParseIPSloppy(externalIP)) {
lp := netutils.LocalPort{
Description: "externalIP for " + svcNameString,
IP: externalIP,
IPFamily: localPortIPFamily,
Port: svcInfo.Port(),
Protocol: utilnet.Protocol(svcInfo.Protocol()),
Protocol: netutils.Protocol(svcInfo.Protocol()),
}
if proxier.portsMap[lp] != nil {
klog.V(4).InfoS("Port was open before and is still needed", "port", lp.String())
@ -1117,7 +1117,7 @@ func (proxier *Proxier) syncProxyRules() {
args = append(args[:0],
"-m", "comment", "--comment", fmt.Sprintf(`"%s external IP"`, svcNameString),
"-m", protocol, "-p", protocol,
"-d", utilproxy.ToCIDR(net.ParseIP(externalIP)),
"-d", utilproxy.ToCIDR(netutils.ParseIPSloppy(externalIP)),
"--dport", strconv.Itoa(svcInfo.Port()),
)
@ -1144,7 +1144,7 @@ func (proxier *Proxier) syncProxyRules() {
"-A", string(kubeExternalServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString),
"-m", protocol, "-p", protocol,
"-d", utilproxy.ToCIDR(net.ParseIP(externalIP)),
"-d", utilproxy.ToCIDR(netutils.ParseIPSloppy(externalIP)),
"--dport", strconv.Itoa(svcInfo.Port()),
"-j", "REJECT",
)
@ -1171,7 +1171,7 @@ func (proxier *Proxier) syncProxyRules() {
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s loadbalancer IP"`, svcNameString),
"-m", protocol, "-p", protocol,
"-d", utilproxy.ToCIDR(net.ParseIP(ingress)),
"-d", utilproxy.ToCIDR(netutils.ParseIPSloppy(ingress)),
"--dport", strconv.Itoa(svcInfo.Port()),
)
// jump to service firewall chain
@ -1199,7 +1199,7 @@ func (proxier *Proxier) syncProxyRules() {
allowFromNode := false
for _, src := range svcInfo.LoadBalancerSourceRanges() {
utilproxy.WriteLine(proxier.natRules, append(args, "-s", src, "-j", string(chosenChain))...)
_, cidr, err := net.ParseCIDR(src)
_, cidr, err := netutils.ParseCIDRSloppy(src)
if err != nil {
klog.ErrorS(err, "Error parsing CIDR in LoadBalancerSourceRanges, dropping it", "cidr", cidr)
} else if cidr.Contains(proxier.nodeIP) {
@ -1210,7 +1210,7 @@ func (proxier *Proxier) syncProxyRules() {
// loadbalancer's backend hosts. In this case, request will not hit the loadbalancer but loop back directly.
// Need to add the following rule to allow request on host.
if allowFromNode {
utilproxy.WriteLine(proxier.natRules, append(args, "-s", utilproxy.ToCIDR(net.ParseIP(ingress)), "-j", string(chosenChain))...)
utilproxy.WriteLine(proxier.natRules, append(args, "-s", utilproxy.ToCIDR(netutils.ParseIPSloppy(ingress)), "-j", string(chosenChain))...)
}
}
@ -1223,7 +1223,7 @@ func (proxier *Proxier) syncProxyRules() {
"-A", string(kubeExternalServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString),
"-m", protocol, "-p", protocol,
"-d", utilproxy.ToCIDR(net.ParseIP(ingress)),
"-d", utilproxy.ToCIDR(netutils.ParseIPSloppy(ingress)),
"--dport", strconv.Itoa(svcInfo.Port()),
"-j", "REJECT",
)
@ -1241,14 +1241,14 @@ func (proxier *Proxier) syncProxyRules() {
continue
}
lps := make([]utilnet.LocalPort, 0)
lps := make([]netutils.LocalPort, 0)
for address := range nodeAddresses {
lp := utilnet.LocalPort{
lp := netutils.LocalPort{
Description: "nodePort for " + svcNameString,
IP: address,
IPFamily: localPortIPFamily,
Port: svcInfo.NodePort(),
Protocol: utilnet.Protocol(svcInfo.Protocol()),
Protocol: netutils.Protocol(svcInfo.Protocol()),
}
if utilproxy.IsZeroCIDR(address) {
// Empty IP address means all
@ -1441,7 +1441,7 @@ func (proxier *Proxier) syncProxyRules() {
args = proxier.appendServiceCommentLocked(args, svcNameString)
// Handle traffic that loops back to the originator with SNAT.
utilproxy.WriteLine(proxier.natRules, append(args,
"-s", utilproxy.ToCIDR(net.ParseIP(epIP)),
"-s", utilproxy.ToCIDR(netutils.ParseIPSloppy(epIP)),
"-j", string(KubeMarkMasqChain))...)
// Update client-affinity lists.
if svcInfo.SessionAffinityType() == v1.ServiceAffinityClientIP {
@ -1564,7 +1564,7 @@ func (proxier *Proxier) syncProxyRules() {
break
}
// Ignore IP addresses with incorrect version
if isIPv6 && !utilnet.IsIPv6String(address) || !isIPv6 && utilnet.IsIPv6String(address) {
if isIPv6 && !netutils.IsIPv6String(address) || !isIPv6 && netutils.IsIPv6String(address) {
klog.ErrorS(nil, "IP has incorrect IP version", "ip", address)
continue
}

View File

@ -51,7 +51,7 @@ import (
iptablestest "k8s.io/kubernetes/pkg/util/iptables/testing"
"k8s.io/utils/exec"
fakeexec "k8s.io/utils/exec/testing"
utilnet "k8s.io/utils/net"
netutils "k8s.io/utils/net"
utilpointer "k8s.io/utils/pointer"
)
@ -285,7 +285,7 @@ func TestDeleteEndpointConnectionsIPv4(t *testing.T) {
var expExecs int
if conntrack.IsClearConntrackNeeded(tc.protocol) {
isIPv6 := func(ip string) bool {
netIP := net.ParseIP(ip)
netIP := netutils.ParseIPSloppy(ip)
return netIP.To4() == nil
}
endpointIP := utilproxy.IPPart(tc.endpoint)
@ -428,7 +428,7 @@ func TestDeleteEndpointConnectionsIPv6(t *testing.T) {
var expExecs int
if conntrack.IsClearConntrackNeeded(tc.protocol) {
isIPv6 := func(ip string) bool {
netIP := net.ParseIP(ip)
netIP := netutils.ParseIPSloppy(ip)
return netIP.To4() == nil
}
endpointIP := utilproxy.IPPart(tc.endpoint)
@ -471,12 +471,12 @@ func (f *fakeCloseable) Close() error {
// fakePortOpener implements portOpener.
type fakePortOpener struct {
openPorts []*utilnet.LocalPort
openPorts []*netutils.LocalPort
}
// OpenLocalPort fakes out the listen() and bind() used by syncProxyRules
// to lock a local port.
func (f *fakePortOpener) OpenLocalPort(lp *utilnet.LocalPort) (utilnet.Closeable, error) {
func (f *fakePortOpener) OpenLocalPort(lp *netutils.LocalPort) (netutils.Closeable, error) {
f.openPorts = append(f.openPorts, lp)
return &fakeCloseable{}, nil
}
@ -501,8 +501,8 @@ func NewFakeProxier(ipt utiliptables.Interface) *Proxier {
masqueradeMark: "0x4000",
localDetector: detectLocal,
hostname: testHostname,
portsMap: make(map[utilnet.LocalPort]utilnet.Closeable),
portMapper: &fakePortOpener{[]*utilnet.LocalPort{}},
portsMap: make(map[netutils.LocalPort]netutils.Closeable),
portMapper: &fakePortOpener{[]*netutils.LocalPort{}},
serviceHealthServer: healthcheck.NewFakeServiceHealthServer(),
precomputedProbabilities: make([]string, 0, 1001),
iptablesData: bytes.NewBuffer(nil),
@ -1123,9 +1123,9 @@ func TestNodePort(t *testing.T) {
)
itf := net.Interface{Index: 0, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}
addrs := []net.Addr{&net.IPNet{IP: net.ParseIP("127.0.0.1"), Mask: net.CIDRMask(16, 32)}}
addrs := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("127.0.0.1"), Mask: net.CIDRMask(16, 32)}}
itf1 := net.Interface{Index: 1, MTU: 0, Name: "eth1", HardwareAddr: nil, Flags: 0}
addrs1 := []net.Addr{&net.IPNet{IP: net.ParseIP("::1/128"), Mask: net.CIDRMask(128, 128)}}
addrs1 := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("::1/128"), Mask: net.CIDRMask(128, 128)}}
fp.networkInterfacer.(*utilproxytest.FakeNetwork).AddInterfaceAddr(&itf, addrs)
fp.networkInterfacer.(*utilproxytest.FakeNetwork).AddInterfaceAddr(&itf1, addrs1)
fp.nodePortAddresses = []string{}
@ -1175,9 +1175,9 @@ func TestHealthCheckNodePort(t *testing.T) {
)
itf := net.Interface{Index: 0, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}
addrs := []net.Addr{&net.IPNet{IP: net.ParseIP("127.0.0.1"), Mask: net.CIDRMask(16, 32)}}
addrs := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("127.0.0.1"), Mask: net.CIDRMask(16, 32)}}
itf1 := net.Interface{Index: 1, MTU: 0, Name: "eth1", HardwareAddr: nil, Flags: 0}
addrs1 := []net.Addr{&net.IPNet{IP: net.ParseIP("::1"), Mask: net.CIDRMask(128, 128)}}
addrs1 := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("::1"), Mask: net.CIDRMask(128, 128)}}
fp.networkInterfacer.(*utilproxytest.FakeNetwork).AddInterfaceAddr(&itf, addrs)
fp.networkInterfacer.(*utilproxytest.FakeNetwork).AddInterfaceAddr(&itf1, addrs1)
fp.nodePortAddresses = []string{"127.0.0.1/16"}
@ -1615,7 +1615,7 @@ func onlyLocalNodePorts(t *testing.T, fp *Proxier, ipt *iptablestest.FakeIPTable
)
itf := net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0}
addrs := []net.Addr{&net.IPNet{IP: net.ParseIP("10.20.30.51"), Mask: net.CIDRMask(24, 32)}}
addrs := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("10.20.30.51"), Mask: net.CIDRMask(24, 32)}}
fp.networkInterfacer.(*utilproxytest.FakeNetwork).AddInterfaceAddr(&itf, addrs)
fp.nodePortAddresses = []string{"10.20.30.0/24"}

View File

@ -17,12 +17,12 @@ limitations under the License.
package ipvs
import (
"net"
"reflect"
"testing"
utilipvs "k8s.io/kubernetes/pkg/util/ipvs"
utilipvstest "k8s.io/kubernetes/pkg/util/ipvs/testing"
netutils "k8s.io/utils/net"
)
func Test_GracefulDeleteRS(t *testing.T) {
@ -37,12 +37,12 @@ func Test_GracefulDeleteRS(t *testing.T) {
{
name: "graceful delete, no connections results in deleting the real server immediatetly",
vs: &utilipvs.VirtualServer{
Address: net.ParseIP("1.1.1.1"),
Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: "tcp",
Port: uint16(80),
},
rs: &utilipvs.RealServer{
Address: net.ParseIP("10.0.0.1"),
Address: netutils.ParseIPSloppy("10.0.0.1"),
Port: uint16(80),
Weight: 100,
ActiveConn: 0,
@ -55,7 +55,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Port: 80,
Protocol: "tcp",
}: {
Address: net.ParseIP("1.1.1.1"),
Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: "tcp",
Port: uint16(80),
},
@ -67,7 +67,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Protocol: "tcp",
}: {
{
Address: net.ParseIP("10.0.0.1"),
Address: netutils.ParseIPSloppy("10.0.0.1"),
Port: uint16(80),
Weight: 100,
ActiveConn: 0,
@ -83,7 +83,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Port: 80,
Protocol: "tcp",
}: {
Address: net.ParseIP("1.1.1.1"),
Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: "tcp",
Port: uint16(80),
},
@ -101,12 +101,12 @@ func Test_GracefulDeleteRS(t *testing.T) {
{
name: "graceful delete, real server has active connections, weight should be 0 but don't delete",
vs: &utilipvs.VirtualServer{
Address: net.ParseIP("1.1.1.1"),
Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: "tcp",
Port: uint16(80),
},
rs: &utilipvs.RealServer{
Address: net.ParseIP("10.0.0.1"),
Address: netutils.ParseIPSloppy("10.0.0.1"),
Port: uint16(80),
Weight: 100,
ActiveConn: 10,
@ -119,7 +119,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Port: 80,
Protocol: "tcp",
}: {
Address: net.ParseIP("1.1.1.1"),
Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: "tcp",
Port: uint16(80),
},
@ -131,7 +131,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Protocol: "tcp",
}: {
{
Address: net.ParseIP("10.0.0.1"),
Address: netutils.ParseIPSloppy("10.0.0.1"),
Port: uint16(80),
Weight: 100,
ActiveConn: 10,
@ -147,7 +147,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Port: 80,
Protocol: "tcp",
}: {
Address: net.ParseIP("1.1.1.1"),
Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: "tcp",
Port: uint16(80),
},
@ -159,7 +159,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Protocol: "tcp",
}: {
{
Address: net.ParseIP("10.0.0.1"),
Address: netutils.ParseIPSloppy("10.0.0.1"),
Port: uint16(80),
Weight: 0,
ActiveConn: 10,
@ -173,12 +173,12 @@ func Test_GracefulDeleteRS(t *testing.T) {
{
name: "graceful delete, real server has in-active connections, weight should be 0 but don't delete",
vs: &utilipvs.VirtualServer{
Address: net.ParseIP("1.1.1.1"),
Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: "tcp",
Port: uint16(80),
},
rs: &utilipvs.RealServer{
Address: net.ParseIP("10.0.0.1"),
Address: netutils.ParseIPSloppy("10.0.0.1"),
Port: uint16(80),
Weight: 100,
ActiveConn: 0,
@ -191,7 +191,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Port: 80,
Protocol: "tcp",
}: {
Address: net.ParseIP("1.1.1.1"),
Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: "tcp",
Port: uint16(80),
},
@ -203,7 +203,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Protocol: "tcp",
}: {
{
Address: net.ParseIP("10.0.0.1"),
Address: netutils.ParseIPSloppy("10.0.0.1"),
Port: uint16(80),
Weight: 100,
ActiveConn: 0,
@ -219,7 +219,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Port: 80,
Protocol: "tcp",
}: {
Address: net.ParseIP("1.1.1.1"),
Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: "tcp",
Port: uint16(80),
},
@ -231,7 +231,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Protocol: "tcp",
}: {
{
Address: net.ParseIP("10.0.0.1"),
Address: netutils.ParseIPSloppy("10.0.0.1"),
Port: uint16(80),
Weight: 0,
ActiveConn: 0,
@ -245,12 +245,12 @@ func Test_GracefulDeleteRS(t *testing.T) {
{
name: "graceful delete, real server has connections, but udp connections are deleted immediately",
vs: &utilipvs.VirtualServer{
Address: net.ParseIP("1.1.1.1"),
Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: "udp",
Port: uint16(80),
},
rs: &utilipvs.RealServer{
Address: net.ParseIP("10.0.0.1"),
Address: netutils.ParseIPSloppy("10.0.0.1"),
Port: uint16(80),
Weight: 100,
ActiveConn: 10,
@ -263,7 +263,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Port: 80,
Protocol: "udp",
}: {
Address: net.ParseIP("1.1.1.1"),
Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: "udp",
Port: uint16(80),
},
@ -275,7 +275,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Protocol: "udp",
}: {
{
Address: net.ParseIP("10.0.0.1"),
Address: netutils.ParseIPSloppy("10.0.0.1"),
Port: uint16(80),
Weight: 100,
ActiveConn: 10,
@ -291,7 +291,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Port: 80,
Protocol: "udp",
}: {
Address: net.ParseIP("1.1.1.1"),
Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: "udp",
Port: uint16(80),
},
@ -309,12 +309,12 @@ func Test_GracefulDeleteRS(t *testing.T) {
{
name: "graceful delete, real server mismatch should be no-op",
vs: &utilipvs.VirtualServer{
Address: net.ParseIP("1.1.1.1"),
Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: "tcp",
Port: uint16(80),
},
rs: &utilipvs.RealServer{
Address: net.ParseIP("10.0.0.1"),
Address: netutils.ParseIPSloppy("10.0.0.1"),
Port: uint16(81), // port mismatched
Weight: 100,
ActiveConn: 0,
@ -327,7 +327,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Port: 80,
Protocol: "tcp",
}: {
Address: net.ParseIP("1.1.1.1"),
Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: "tcp",
Port: uint16(80),
},
@ -339,7 +339,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Protocol: "tcp",
}: {
{
Address: net.ParseIP("10.0.0.1"),
Address: netutils.ParseIPSloppy("10.0.0.1"),
Port: uint16(80),
Weight: 100,
ActiveConn: 0,
@ -355,7 +355,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Port: 80,
Protocol: "tcp",
}: {
Address: net.ParseIP("1.1.1.1"),
Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: "tcp",
Port: uint16(80),
},
@ -367,7 +367,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Protocol: "tcp",
}: {
{
Address: net.ParseIP("10.0.0.1"),
Address: netutils.ParseIPSloppy("10.0.0.1"),
Port: uint16(80),
Weight: 100,
ActiveConn: 0,

View File

@ -20,9 +20,9 @@ package ipvs
import (
"fmt"
"net"
"k8s.io/apimachinery/pkg/util/sets"
netutils "k8s.io/utils/net"
"github.com/vishvananda/netlink"
"golang.org/x/sys/unix"
@ -44,7 +44,7 @@ func (h *netlinkHandle) EnsureAddressBind(address, devName string) (exist bool,
if err != nil {
return false, fmt.Errorf("error get interface: %s, err: %v", devName, err)
}
addr := net.ParseIP(address)
addr := netutils.ParseIPSloppy(address)
if addr == nil {
return false, fmt.Errorf("error parse ip address: %s", address)
}
@ -64,7 +64,7 @@ func (h *netlinkHandle) UnbindAddress(address, devName string) error {
if err != nil {
return fmt.Errorf("error get interface: %s, err: %v", devName, err)
}
addr := net.ParseIP(address)
addr := netutils.ParseIPSloppy(address)
if addr == nil {
return fmt.Errorf("error parse ip address: %s", address)
}

View File

@ -34,7 +34,7 @@ import (
"k8s.io/klog/v2"
utilexec "k8s.io/utils/exec"
utilnet "k8s.io/utils/net"
netutils "k8s.io/utils/net"
v1 "k8s.io/api/core/v1"
discovery "k8s.io/api/discovery/v1"
@ -222,7 +222,7 @@ type Proxier struct {
mu sync.Mutex // protects the following fields
serviceMap proxy.ServiceMap
endpointsMap proxy.EndpointsMap
portsMap map[utilnet.LocalPort]utilnet.Closeable
portsMap map[netutils.LocalPort]netutils.Closeable
nodeLabels map[string]string
// endpointSlicesSynced, and servicesSynced are set to true when
// corresponding objects are synced after startup. This is used to avoid updating
@ -248,7 +248,7 @@ type Proxier struct {
localDetector proxyutiliptables.LocalTrafficDetector
hostname string
nodeIP net.IP
portMapper utilnet.PortOpener
portMapper netutils.PortOpener
recorder events.EventRecorder
serviceHealthServer healthcheck.ServiceHealthServer
@ -312,7 +312,7 @@ func (r *realIPGetter) NodeIPs() (ips []net.IP, err error) {
}
// translate ip string to IP
for _, ipStr := range nodeAddress.UnsortedList() {
a := net.ParseIP(ipStr)
a := netutils.ParseIPSloppy(ipStr)
if a.IsLoopback() {
continue
}
@ -451,11 +451,11 @@ func NewProxier(ipt utiliptables.Interface,
}
// excludeCIDRs has been validated before, here we just parse it to IPNet list
parsedExcludeCIDRs, _ := utilnet.ParseCIDRs(excludeCIDRs)
parsedExcludeCIDRs, _ := netutils.ParseCIDRs(excludeCIDRs)
proxier := &Proxier{
ipFamily: ipFamily,
portsMap: make(map[utilnet.LocalPort]utilnet.Closeable),
portsMap: make(map[netutils.LocalPort]netutils.Closeable),
serviceMap: make(proxy.ServiceMap),
serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, ipFamily, recorder, nil),
endpointsMap: make(proxy.EndpointsMap),
@ -470,7 +470,7 @@ func NewProxier(ipt utiliptables.Interface,
localDetector: localDetector,
hostname: hostname,
nodeIP: nodeIP,
portMapper: &utilnet.ListenPortOpener,
portMapper: &netutils.ListenPortOpener,
recorder: recorder,
serviceHealthServer: serviceHealthServer,
healthzServer: healthzServer,
@ -558,7 +558,7 @@ func NewDualStackProxier(
func filterCIDRs(wantIPv6 bool, cidrs []string) []string {
var filteredCIDRs []string
for _, cidr := range cidrs {
if utilnet.IsIPv6CIDRString(cidr) == wantIPv6 {
if netutils.IsIPv6CIDRString(cidr) == wantIPv6 {
filteredCIDRs = append(filteredCIDRs, cidr)
}
}
@ -1077,7 +1077,7 @@ func (proxier *Proxier) syncProxyRules() {
}
// Accumulate the set of local ports that we will be holding open once this update is complete
replacementPortsMap := map[utilnet.LocalPort]utilnet.Closeable{}
replacementPortsMap := map[netutils.LocalPort]netutils.Closeable{}
// activeIPVSServices represents IPVS service successfully created in this round of sync
activeIPVSServices := map[string]bool{}
// currentIPVSServices represent IPVS services listed from the system
@ -1115,7 +1115,7 @@ func (proxier *Proxier) syncProxyRules() {
} else {
nodeAddresses = nodeAddrSet.List()
for _, address := range nodeAddresses {
a := net.ParseIP(address)
a := netutils.ParseIPSloppy(address)
if a.IsLoopback() {
continue
}
@ -1134,7 +1134,7 @@ func (proxier *Proxier) syncProxyRules() {
// filter node IPs by proxier ipfamily
idx := 0
for _, nodeIP := range nodeIPs {
if (proxier.ipFamily == v1.IPv6Protocol) == utilnet.IsIPv6(nodeIP) {
if (proxier.ipFamily == v1.IPv6Protocol) == netutils.IsIPv6(nodeIP) {
nodeIPs[idx] = nodeIP
idx++
}
@ -1151,10 +1151,10 @@ func (proxier *Proxier) syncProxyRules() {
klog.ErrorS(nil, "Failed to cast serviceInfo", "svcName", svcName.String())
continue
}
isIPv6 := utilnet.IsIPv6(svcInfo.ClusterIP())
localPortIPFamily := utilnet.IPv4
isIPv6 := netutils.IsIPv6(svcInfo.ClusterIP())
localPortIPFamily := netutils.IPv4
if isIPv6 {
localPortIPFamily = utilnet.IPv6
localPortIPFamily = netutils.IPv6
}
protocol := strings.ToLower(string(svcInfo.Protocol()))
// Precompute svcNameString; with many services the many calls
@ -1240,14 +1240,14 @@ func (proxier *Proxier) syncProxyRules() {
// If the "external" IP happens to be an IP that is local to this
// machine, hold the local port open so no other process can open it
// (because the socket might open but it would never work).
if (svcInfo.Protocol() != v1.ProtocolSCTP) && localAddrSet.Has(net.ParseIP(externalIP)) {
if (svcInfo.Protocol() != v1.ProtocolSCTP) && localAddrSet.Has(netutils.ParseIPSloppy(externalIP)) {
// We do not start listening on SCTP ports, according to our agreement in the SCTP support KEP
lp := utilnet.LocalPort{
lp := netutils.LocalPort{
Description: "externalIP for " + svcNameString,
IP: externalIP,
IPFamily: localPortIPFamily,
Port: svcInfo.Port(),
Protocol: utilnet.Protocol(svcInfo.Protocol()),
Protocol: netutils.Protocol(svcInfo.Protocol()),
}
if proxier.portsMap[lp] != nil {
klog.V(4).InfoS("Port was open before and is still needed", "port", lp.String())
@ -1297,7 +1297,7 @@ func (proxier *Proxier) syncProxyRules() {
// ipvs call
serv := &utilipvs.VirtualServer{
Address: net.ParseIP(externalIP),
Address: netutils.ParseIPSloppy(externalIP),
Port: uint16(svcInfo.Port()),
Protocol: string(svcInfo.Protocol()),
Scheduler: proxier.ipvsScheduler,
@ -1372,7 +1372,7 @@ func (proxier *Proxier) syncProxyRules() {
proxier.ipsetList[kubeLoadBalancerSourceCIDRSet].activeEntries.Insert(entry.String())
// ignore error because it has been validated
_, cidr, _ := net.ParseCIDR(src)
_, cidr, _ := netutils.ParseCIDRSloppy(src)
if cidr.Contains(proxier.nodeIP) {
allowFromNode = true
}
@ -1399,7 +1399,7 @@ func (proxier *Proxier) syncProxyRules() {
// ipvs call
serv := &utilipvs.VirtualServer{
Address: net.ParseIP(ingress),
Address: netutils.ParseIPSloppy(ingress),
Port: uint16(svcInfo.Port()),
Protocol: string(svcInfo.Protocol()),
Scheduler: proxier.ipvsScheduler,
@ -1427,14 +1427,14 @@ func (proxier *Proxier) syncProxyRules() {
continue
}
var lps []utilnet.LocalPort
var lps []netutils.LocalPort
for _, address := range nodeAddresses {
lp := utilnet.LocalPort{
lp := netutils.LocalPort{
Description: "nodePort for " + svcNameString,
IP: address,
IPFamily: localPortIPFamily,
Port: svcInfo.NodePort(),
Protocol: utilnet.Protocol(svcInfo.Protocol()),
Protocol: netutils.Protocol(svcInfo.Protocol()),
}
if utilproxy.IsZeroCIDR(address) {
// Empty IP address means all
@ -1470,7 +1470,7 @@ func (proxier *Proxier) syncProxyRules() {
}
klog.V(2).InfoS("Opened local port", "port", lp.String())
if lp.Protocol == utilnet.UDP {
if lp.Protocol == netutils.UDP {
conntrack.ClearEntriesForPort(proxier.exec, lp.Port, isIPv6, v1.ProtocolUDP)
}
replacementPortsMap[lp] = socket
@ -2111,7 +2111,7 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode
}
newDest := &utilipvs.RealServer{
Address: net.ParseIP(ip),
Address: netutils.ParseIPSloppy(ip),
Port: uint16(portNum),
Weight: 1,
}
@ -2154,7 +2154,7 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode
}
delDest := &utilipvs.RealServer{
Address: net.ParseIP(ip),
Address: netutils.ParseIPSloppy(ip),
Port: uint16(portNum),
}
@ -2169,13 +2169,13 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode
}
func (proxier *Proxier) cleanLegacyService(activeServices map[string]bool, currentServices map[string]*utilipvs.VirtualServer, legacyBindAddrs map[string]bool) {
isIPv6 := utilnet.IsIPv6(proxier.nodeIP)
isIPv6 := netutils.IsIPv6(proxier.nodeIP)
for cs := range currentServices {
svc := currentServices[cs]
if proxier.isIPInExcludeCIDRs(svc.Address) {
continue
}
if utilnet.IsIPv6(svc.Address) != isIPv6 {
if netutils.IsIPv6(svc.Address) != isIPv6 {
// Not our family
continue
}
@ -2210,9 +2210,9 @@ func (proxier *Proxier) isIPInExcludeCIDRs(ip net.IP) bool {
func (proxier *Proxier) getLegacyBindAddr(activeBindAddrs map[string]bool, currentBindAddrs []string) map[string]bool {
legacyAddrs := make(map[string]bool)
isIPv6 := utilnet.IsIPv6(proxier.nodeIP)
isIPv6 := netutils.IsIPv6(proxier.nodeIP)
for _, addr := range currentBindAddrs {
addrIsIPv6 := utilnet.IsIPv6(net.ParseIP(addr))
addrIsIPv6 := netutils.IsIPv6(netutils.ParseIPSloppy(addr))
if addrIsIPv6 && !isIPv6 || !addrIsIPv6 && isIPv6 {
continue
}

View File

@ -50,9 +50,8 @@ import (
ipvstest "k8s.io/kubernetes/pkg/util/ipvs/testing"
"k8s.io/utils/exec"
fakeexec "k8s.io/utils/exec/testing"
netutils "k8s.io/utils/net"
utilpointer "k8s.io/utils/pointer"
utilnet "k8s.io/utils/net"
)
const testHostname = "test-hostname"
@ -72,12 +71,12 @@ func (f *fakeIPGetter) BindedIPs() (sets.String, error) {
// fakePortOpener implements portOpener.
type fakePortOpener struct {
openPorts []*utilnet.LocalPort
openPorts []*netutils.LocalPort
}
// OpenLocalPort fakes out the listen() and bind() used by syncProxyRules
// to lock a local port.
func (f *fakePortOpener) OpenLocalPort(lp *utilnet.LocalPort) (utilnet.Closeable, error) {
func (f *fakePortOpener) OpenLocalPort(lp *netutils.LocalPort) (netutils.Closeable, error) {
f.openPorts = append(f.openPorts, lp)
return nil, nil
}
@ -113,7 +112,7 @@ func NewFakeProxier(ipt utiliptables.Interface, ipvs utilipvs.Interface, ipset u
// filter node IPs by proxier ipfamily
idx := 0
for _, nodeIP := range nodeIPs {
if (ipFamily == v1.IPv6Protocol) == utilnet.IsIPv6(nodeIP) {
if (ipFamily == v1.IPv6Protocol) == netutils.IsIPv6(nodeIP) {
nodeIPs[idx] = nodeIP
idx++
}
@ -153,8 +152,8 @@ func NewFakeProxier(ipt utiliptables.Interface, ipvs utilipvs.Interface, ipset u
strictARP: false,
localDetector: proxyutiliptables.NewNoOpLocalDetector(),
hostname: testHostname,
portsMap: make(map[utilnet.LocalPort]utilnet.Closeable),
portMapper: &fakePortOpener{[]*utilnet.LocalPort{}},
portsMap: make(map[netutils.LocalPort]netutils.Closeable),
portMapper: &fakePortOpener{[]*netutils.LocalPort{}},
serviceHealthServer: healthcheck.NewFakeServiceHealthServer(),
ipvsScheduler: DefaultScheduler,
ipGetter: &fakeIPGetter{nodeIPs: nodeIPs},
@ -513,8 +512,8 @@ func TestNodePortIPv4(t *testing.T) {
}),
},
nodeIPs: []net.IP{
net.ParseIP("100.101.102.103"),
net.ParseIP("2001:db8::1:1"),
netutils.ParseIPSloppy("100.101.102.103"),
netutils.ParseIPSloppy("2001:db8::1:1"),
},
nodePortAddresses: []string{},
expectedIPVS: &ipvstest.FakeIPVS{
@ -524,7 +523,7 @@ func TestNodePortIPv4(t *testing.T) {
Port: 80,
Protocol: "TCP",
}: {
Address: net.ParseIP("10.20.30.41"),
Address: netutils.ParseIPSloppy("10.20.30.41"),
Protocol: "TCP",
Port: uint16(80),
Scheduler: "rr",
@ -534,7 +533,7 @@ func TestNodePortIPv4(t *testing.T) {
Port: 3001,
Protocol: "TCP",
}: {
Address: net.ParseIP("100.101.102.103"),
Address: netutils.ParseIPSloppy("100.101.102.103"),
Protocol: "TCP",
Port: uint16(3001),
Scheduler: "rr",
@ -547,7 +546,7 @@ func TestNodePortIPv4(t *testing.T) {
Protocol: "TCP",
}: {
{
Address: net.ParseIP("10.180.0.1"),
Address: netutils.ParseIPSloppy("10.180.0.1"),
Port: uint16(80),
Weight: 1,
},
@ -558,7 +557,7 @@ func TestNodePortIPv4(t *testing.T) {
Protocol: "TCP",
}: {
{
Address: net.ParseIP("10.180.0.1"),
Address: netutils.ParseIPSloppy("10.180.0.1"),
Port: uint16(80),
Weight: 1,
},
@ -594,7 +593,7 @@ func TestNodePortIPv4(t *testing.T) {
}),
},
nodeIPs: []net.IP{
net.ParseIP("100.101.102.103"),
netutils.ParseIPSloppy("100.101.102.103"),
},
nodePortAddresses: []string{"0.0.0.0/0"},
expectedIPVS: &ipvstest.FakeIPVS{
@ -604,7 +603,7 @@ func TestNodePortIPv4(t *testing.T) {
Port: 80,
Protocol: "UDP",
}: {
Address: net.ParseIP("10.20.30.41"),
Address: netutils.ParseIPSloppy("10.20.30.41"),
Protocol: "UDP",
Port: uint16(80),
Scheduler: "rr",
@ -614,7 +613,7 @@ func TestNodePortIPv4(t *testing.T) {
Port: 3001,
Protocol: "UDP",
}: {
Address: net.ParseIP("100.101.102.103"),
Address: netutils.ParseIPSloppy("100.101.102.103"),
Protocol: "UDP",
Port: uint16(3001),
Scheduler: "rr",
@ -627,7 +626,7 @@ func TestNodePortIPv4(t *testing.T) {
Protocol: "UDP",
}: {
{
Address: net.ParseIP("10.180.0.1"),
Address: netutils.ParseIPSloppy("10.180.0.1"),
Port: uint16(80),
Weight: 1,
},
@ -638,7 +637,7 @@ func TestNodePortIPv4(t *testing.T) {
Protocol: "UDP",
}: {
{
Address: net.ParseIP("10.180.0.1"),
Address: netutils.ParseIPSloppy("10.180.0.1"),
Port: uint16(80),
Weight: 1,
},
@ -677,7 +676,7 @@ func TestNodePortIPv4(t *testing.T) {
},
endpoints: []*discovery.EndpointSlice{},
nodeIPs: []net.IP{
net.ParseIP("100.101.102.103"),
netutils.ParseIPSloppy("100.101.102.103"),
},
nodePortAddresses: []string{},
expectedIPVS: &ipvstest.FakeIPVS{
@ -687,7 +686,7 @@ func TestNodePortIPv4(t *testing.T) {
Port: 80,
Protocol: "TCP",
}: {
Address: net.ParseIP("10.20.30.41"),
Address: netutils.ParseIPSloppy("10.20.30.41"),
Protocol: "TCP",
Port: uint16(80),
Scheduler: "rr",
@ -697,7 +696,7 @@ func TestNodePortIPv4(t *testing.T) {
Port: 3001,
Protocol: "TCP",
}: {
Address: net.ParseIP("100.101.102.103"),
Address: netutils.ParseIPSloppy("100.101.102.103"),
Protocol: "TCP",
Port: uint16(3001),
Scheduler: "rr",
@ -745,12 +744,12 @@ func TestNodePortIPv4(t *testing.T) {
}),
},
nodeIPs: []net.IP{
net.ParseIP("100.101.102.103"),
net.ParseIP("100.101.102.104"),
net.ParseIP("100.101.102.105"),
net.ParseIP("2001:db8::1:1"),
net.ParseIP("2001:db8::1:2"),
net.ParseIP("2001:db8::1:3"),
netutils.ParseIPSloppy("100.101.102.103"),
netutils.ParseIPSloppy("100.101.102.104"),
netutils.ParseIPSloppy("100.101.102.105"),
netutils.ParseIPSloppy("2001:db8::1:1"),
netutils.ParseIPSloppy("2001:db8::1:2"),
netutils.ParseIPSloppy("2001:db8::1:3"),
},
nodePortAddresses: []string{},
expectedIPVS: &ipvstest.FakeIPVS{
@ -760,7 +759,7 @@ func TestNodePortIPv4(t *testing.T) {
Port: 80,
Protocol: "SCTP",
}: {
Address: net.ParseIP("10.20.30.41"),
Address: netutils.ParseIPSloppy("10.20.30.41"),
Protocol: "SCTP",
Port: uint16(80),
Scheduler: "rr",
@ -770,7 +769,7 @@ func TestNodePortIPv4(t *testing.T) {
Port: 3001,
Protocol: "SCTP",
}: {
Address: net.ParseIP("100.101.102.103"),
Address: netutils.ParseIPSloppy("100.101.102.103"),
Protocol: "SCTP",
Port: uint16(3001),
Scheduler: "rr",
@ -780,7 +779,7 @@ func TestNodePortIPv4(t *testing.T) {
Port: 3001,
Protocol: "SCTP",
}: {
Address: net.ParseIP("100.101.102.104"),
Address: netutils.ParseIPSloppy("100.101.102.104"),
Protocol: "SCTP",
Port: uint16(3001),
Scheduler: "rr",
@ -790,7 +789,7 @@ func TestNodePortIPv4(t *testing.T) {
Port: 3001,
Protocol: "SCTP",
}: {
Address: net.ParseIP("100.101.102.105"),
Address: netutils.ParseIPSloppy("100.101.102.105"),
Protocol: "SCTP",
Port: uint16(3001),
Scheduler: "rr",
@ -803,7 +802,7 @@ func TestNodePortIPv4(t *testing.T) {
Protocol: "SCTP",
}: {
{
Address: net.ParseIP("10.180.0.1"),
Address: netutils.ParseIPSloppy("10.180.0.1"),
Port: uint16(80),
Weight: 1,
},
@ -814,7 +813,7 @@ func TestNodePortIPv4(t *testing.T) {
Protocol: "SCTP",
}: {
{
Address: net.ParseIP("10.180.0.1"),
Address: netutils.ParseIPSloppy("10.180.0.1"),
Port: uint16(80),
Weight: 1,
},
@ -825,7 +824,7 @@ func TestNodePortIPv4(t *testing.T) {
Protocol: "SCTP",
}: {
{
Address: net.ParseIP("10.180.0.1"),
Address: netutils.ParseIPSloppy("10.180.0.1"),
Port: uint16(80),
Weight: 1,
},
@ -836,7 +835,7 @@ func TestNodePortIPv4(t *testing.T) {
Protocol: "SCTP",
}: {
{
Address: net.ParseIP("10.180.0.1"),
Address: netutils.ParseIPSloppy("10.180.0.1"),
Port: uint16(80),
Weight: 1,
},
@ -951,8 +950,8 @@ func TestNodePortIPv6(t *testing.T) {
}),
},
nodeIPs: []net.IP{
net.ParseIP("100.101.102.103"),
net.ParseIP("2001:db8::1:1"),
netutils.ParseIPSloppy("100.101.102.103"),
netutils.ParseIPSloppy("2001:db8::1:1"),
},
nodePortAddresses: []string{},
expectedIPVS: &ipvstest.FakeIPVS{
@ -962,7 +961,7 @@ func TestNodePortIPv6(t *testing.T) {
Port: 3001,
Protocol: "TCP",
}: {
Address: net.ParseIP("2001:db8::1:1"),
Address: netutils.ParseIPSloppy("2001:db8::1:1"),
Protocol: "TCP",
Port: uint16(3001),
Scheduler: "rr",
@ -972,7 +971,7 @@ func TestNodePortIPv6(t *testing.T) {
Port: 80,
Protocol: "TCP",
}: {
Address: net.ParseIP("2020::1"),
Address: netutils.ParseIPSloppy("2020::1"),
Protocol: "TCP",
Port: uint16(80),
Scheduler: "rr",
@ -985,7 +984,7 @@ func TestNodePortIPv6(t *testing.T) {
Protocol: "TCP",
}: {
{
Address: net.ParseIP("1002:ab8::2:10"),
Address: netutils.ParseIPSloppy("1002:ab8::2:10"),
Port: uint16(80),
Weight: 1,
},
@ -997,7 +996,7 @@ func TestNodePortIPv6(t *testing.T) {
Protocol: "TCP",
}: {
{
Address: net.ParseIP("1002:ab8::2:10"),
Address: netutils.ParseIPSloppy("1002:ab8::2:10"),
Port: uint16(80),
Weight: 1,
},
@ -1034,7 +1033,7 @@ func TestNodePortIPv6(t *testing.T) {
}),
},
nodeIPs: []net.IP{
net.ParseIP("100.101.102.103"),
netutils.ParseIPSloppy("100.101.102.103"),
},
nodePortAddresses: []string{"0.0.0.0/0"},
/*since this is a node with only IPv4, proxier should not do anything */
@ -1062,8 +1061,8 @@ func TestNodePortIPv6(t *testing.T) {
},
endpoints: []*discovery.EndpointSlice{},
nodeIPs: []net.IP{
net.ParseIP("100.101.102.103"),
net.ParseIP("2001:db8::1:1"),
netutils.ParseIPSloppy("100.101.102.103"),
netutils.ParseIPSloppy("2001:db8::1:1"),
},
nodePortAddresses: []string{},
expectedIPVS: &ipvstest.FakeIPVS{
@ -1073,7 +1072,7 @@ func TestNodePortIPv6(t *testing.T) {
Port: 3001,
Protocol: "TCP",
}: {
Address: net.ParseIP("2001:db8::1:1"),
Address: netutils.ParseIPSloppy("2001:db8::1:1"),
Protocol: "TCP",
Port: uint16(3001),
Scheduler: "rr",
@ -1083,7 +1082,7 @@ func TestNodePortIPv6(t *testing.T) {
Port: 80,
Protocol: "TCP",
}: {
Address: net.ParseIP("2020::1"),
Address: netutils.ParseIPSloppy("2020::1"),
Protocol: "TCP",
Port: uint16(80),
Scheduler: "rr",
@ -1132,8 +1131,8 @@ func TestNodePortIPv6(t *testing.T) {
}),
},
nodeIPs: []net.IP{
net.ParseIP("2001:db8::1:1"),
net.ParseIP("2001:db8::1:2"),
netutils.ParseIPSloppy("2001:db8::1:1"),
netutils.ParseIPSloppy("2001:db8::1:2"),
},
nodePortAddresses: []string{},
expectedIPVS: &ipvstest.FakeIPVS{
@ -1143,7 +1142,7 @@ func TestNodePortIPv6(t *testing.T) {
Port: 3001,
Protocol: "SCTP",
}: {
Address: net.ParseIP("2001:db8::1:1"),
Address: netutils.ParseIPSloppy("2001:db8::1:1"),
Protocol: "SCTP",
Port: uint16(3001),
Scheduler: "rr",
@ -1153,7 +1152,7 @@ func TestNodePortIPv6(t *testing.T) {
Port: 3001,
Protocol: "SCTP",
}: {
Address: net.ParseIP("2001:db8::1:2"),
Address: netutils.ParseIPSloppy("2001:db8::1:2"),
Protocol: "SCTP",
Port: uint16(3001),
Scheduler: "rr",
@ -1163,7 +1162,7 @@ func TestNodePortIPv6(t *testing.T) {
Port: 80,
Protocol: "SCTP",
}: {
Address: net.ParseIP("2020::1"),
Address: netutils.ParseIPSloppy("2020::1"),
Protocol: "SCTP",
Port: uint16(80),
Scheduler: "rr",
@ -1176,7 +1175,7 @@ func TestNodePortIPv6(t *testing.T) {
Protocol: "SCTP",
}: {
{
Address: net.ParseIP("2001::1"),
Address: netutils.ParseIPSloppy("2001::1"),
Port: uint16(80),
Weight: 1,
},
@ -1187,7 +1186,7 @@ func TestNodePortIPv6(t *testing.T) {
Protocol: "SCTP",
}: {
{
Address: net.ParseIP("2001::1"),
Address: netutils.ParseIPSloppy("2001::1"),
Port: uint16(80),
Weight: 1,
},
@ -1198,7 +1197,7 @@ func TestNodePortIPv6(t *testing.T) {
Protocol: "SCTP",
}: {
{
Address: net.ParseIP("2001::1"),
Address: netutils.ParseIPSloppy("2001::1"),
Port: uint16(80),
Weight: 1,
},
@ -1313,7 +1312,7 @@ func TestIPv4Proxier(t *testing.T) {
Port: 80,
Protocol: "TCP",
}: {
Address: net.ParseIP("10.20.30.41"),
Address: netutils.ParseIPSloppy("10.20.30.41"),
Protocol: "TCP",
Port: uint16(80),
Scheduler: "rr",
@ -1326,7 +1325,7 @@ func TestIPv4Proxier(t *testing.T) {
Protocol: "TCP",
}: {
{
Address: net.ParseIP("10.180.0.1"),
Address: netutils.ParseIPSloppy("10.180.0.1"),
Port: uint16(80),
Weight: 1,
},
@ -1354,7 +1353,7 @@ func TestIPv4Proxier(t *testing.T) {
Port: 80,
Protocol: "TCP",
}: {
Address: net.ParseIP("10.20.30.41"),
Address: netutils.ParseIPSloppy("10.20.30.41"),
Protocol: "TCP",
Port: uint16(80),
Scheduler: "rr",
@ -1451,7 +1450,7 @@ func TestIPv6Proxier(t *testing.T) {
Port: 8080,
Protocol: "TCP",
}: {
Address: net.ParseIP("1002:ab8::2:1"),
Address: netutils.ParseIPSloppy("1002:ab8::2:1"),
Protocol: "TCP",
Port: uint16(8080),
Scheduler: "rr",
@ -1464,7 +1463,7 @@ func TestIPv6Proxier(t *testing.T) {
Protocol: "TCP",
}: {
{
Address: net.ParseIP("1009:ab8::5:6"),
Address: netutils.ParseIPSloppy("1009:ab8::5:6"),
Port: uint16(8080),
Weight: 1,
},
@ -1492,7 +1491,7 @@ func TestIPv6Proxier(t *testing.T) {
Port: 80,
Protocol: "TCP",
}: {
Address: net.ParseIP("2001::1"),
Address: netutils.ParseIPSloppy("2001::1"),
Protocol: "TCP",
Port: uint16(80),
Scheduler: "rr",
@ -1832,7 +1831,7 @@ func TestLoadBalancer(t *testing.T) {
}
func TestOnlyLocalNodePorts(t *testing.T) {
nodeIP := net.ParseIP("100.101.102.103")
nodeIP := netutils.ParseIPSloppy("100.101.102.103")
ipt, fp := buildFakeProxier()
svcIP := "10.20.30.41"
@ -1882,9 +1881,9 @@ func TestOnlyLocalNodePorts(t *testing.T) {
)
itf := net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0}
addrs := []net.Addr{&net.IPNet{IP: net.ParseIP("100.101.102.103"), Mask: net.CIDRMask(24, 32)}}
addrs := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("100.101.102.103"), Mask: net.CIDRMask(24, 32)}}
itf1 := net.Interface{Index: 1, MTU: 0, Name: "eth1", HardwareAddr: nil, Flags: 0}
addrs1 := []net.Addr{&net.IPNet{IP: net.ParseIP("2001:db8::"), Mask: net.CIDRMask(64, 128)}}
addrs1 := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("2001:db8::"), Mask: net.CIDRMask(64, 128)}}
fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf, addrs)
fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf1, addrs1)
fp.nodePortAddresses = []string{"100.101.102.0/24", "2001:db8::0/64"}
@ -1962,9 +1961,9 @@ func TestHealthCheckNodePort(t *testing.T) {
)
itf := net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0}
addrs := []net.Addr{&net.IPNet{IP: net.ParseIP("100.101.102.103"), Mask: net.CIDRMask(24, 32)}}
addrs := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("100.101.102.103"), Mask: net.CIDRMask(24, 32)}}
itf1 := net.Interface{Index: 1, MTU: 0, Name: "eth1", HardwareAddr: nil, Flags: 0}
addrs1 := []net.Addr{&net.IPNet{IP: net.ParseIP("2001:db8::"), Mask: net.CIDRMask(64, 128)}}
addrs1 := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("2001:db8::"), Mask: net.CIDRMask(64, 128)}}
fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf, addrs)
fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf1, addrs1)
fp.nodePortAddresses = []string{"100.101.102.0/24", "2001:db8::0/64"}
@ -2528,7 +2527,7 @@ func TestSessionAffinity(t *testing.T) {
ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion)
nodeIP := net.ParseIP("100.101.102.103")
nodeIP := netutils.ParseIPSloppy("100.101.102.103")
fp := NewFakeProxier(ipt, ipvs, ipset, []net.IP{nodeIP}, nil, v1.IPv4Protocol)
svcIP := "10.20.30.41"
svcPort := 80
@ -3432,7 +3431,7 @@ func Test_syncService(t *testing.T) {
{
// case 0, old virtual server is same as new virtual server
oldVirtualServer: &utilipvs.VirtualServer{
Address: net.ParseIP("1.2.3.4"),
Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: string(v1.ProtocolTCP),
Port: 80,
Scheduler: "rr",
@ -3440,7 +3439,7 @@ func Test_syncService(t *testing.T) {
},
svcName: "foo",
newVirtualServer: &utilipvs.VirtualServer{
Address: net.ParseIP("1.2.3.4"),
Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: string(v1.ProtocolTCP),
Port: 80,
Scheduler: "rr",
@ -3452,7 +3451,7 @@ func Test_syncService(t *testing.T) {
{
// case 1, old virtual server is different from new virtual server
oldVirtualServer: &utilipvs.VirtualServer{
Address: net.ParseIP("1.2.3.4"),
Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: string(v1.ProtocolTCP),
Port: 8080,
Scheduler: "rr",
@ -3460,7 +3459,7 @@ func Test_syncService(t *testing.T) {
},
svcName: "bar",
newVirtualServer: &utilipvs.VirtualServer{
Address: net.ParseIP("1.2.3.4"),
Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: string(v1.ProtocolTCP),
Port: 8080,
Scheduler: "rr",
@ -3472,7 +3471,7 @@ func Test_syncService(t *testing.T) {
{
// case 2, old virtual server is different from new virtual server
oldVirtualServer: &utilipvs.VirtualServer{
Address: net.ParseIP("1.2.3.4"),
Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: string(v1.ProtocolTCP),
Port: 8080,
Scheduler: "rr",
@ -3480,7 +3479,7 @@ func Test_syncService(t *testing.T) {
},
svcName: "bar",
newVirtualServer: &utilipvs.VirtualServer{
Address: net.ParseIP("1.2.3.4"),
Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: string(v1.ProtocolTCP),
Port: 8080,
Scheduler: "wlc",
@ -3494,7 +3493,7 @@ func Test_syncService(t *testing.T) {
oldVirtualServer: nil,
svcName: "baz",
newVirtualServer: &utilipvs.VirtualServer{
Address: net.ParseIP("1.2.3.4"),
Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: string(v1.ProtocolUDP),
Port: 53,
Scheduler: "rr",
@ -3506,7 +3505,7 @@ func Test_syncService(t *testing.T) {
{
// case 4, SCTP, old virtual server is same as new virtual server
oldVirtualServer: &utilipvs.VirtualServer{
Address: net.ParseIP("1.2.3.4"),
Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: string(v1.ProtocolSCTP),
Port: 80,
Scheduler: "rr",
@ -3514,7 +3513,7 @@ func Test_syncService(t *testing.T) {
},
svcName: "foo",
newVirtualServer: &utilipvs.VirtualServer{
Address: net.ParseIP("1.2.3.4"),
Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: string(v1.ProtocolSCTP),
Port: 80,
Scheduler: "rr",
@ -3526,7 +3525,7 @@ func Test_syncService(t *testing.T) {
{
// case 5, old virtual server is different from new virtual server
oldVirtualServer: &utilipvs.VirtualServer{
Address: net.ParseIP("1.2.3.4"),
Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: string(v1.ProtocolSCTP),
Port: 8080,
Scheduler: "rr",
@ -3534,7 +3533,7 @@ func Test_syncService(t *testing.T) {
},
svcName: "bar",
newVirtualServer: &utilipvs.VirtualServer{
Address: net.ParseIP("1.2.3.4"),
Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: string(v1.ProtocolSCTP),
Port: 8080,
Scheduler: "rr",
@ -3546,7 +3545,7 @@ func Test_syncService(t *testing.T) {
{
// case 6, old virtual server is different from new virtual server
oldVirtualServer: &utilipvs.VirtualServer{
Address: net.ParseIP("1.2.3.4"),
Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: string(v1.ProtocolSCTP),
Port: 8080,
Scheduler: "rr",
@ -3554,7 +3553,7 @@ func Test_syncService(t *testing.T) {
},
svcName: "bar",
newVirtualServer: &utilipvs.VirtualServer{
Address: net.ParseIP("1.2.3.4"),
Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: string(v1.ProtocolSCTP),
Port: 8080,
Scheduler: "wlc",
@ -3568,7 +3567,7 @@ func Test_syncService(t *testing.T) {
oldVirtualServer: nil,
svcName: "baz",
newVirtualServer: &utilipvs.VirtualServer{
Address: net.ParseIP("1.2.3.4"),
Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: string(v1.ProtocolSCTP),
Port: 53,
Scheduler: "rr",
@ -3580,7 +3579,7 @@ func Test_syncService(t *testing.T) {
{
// case 8, virtual server address already binded, skip sync
oldVirtualServer: &utilipvs.VirtualServer{
Address: net.ParseIP("1.2.3.4"),
Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: string(v1.ProtocolSCTP),
Port: 53,
Scheduler: "rr",
@ -3588,7 +3587,7 @@ func Test_syncService(t *testing.T) {
},
svcName: "baz",
newVirtualServer: &utilipvs.VirtualServer{
Address: net.ParseIP("1.2.3.4"),
Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: string(v1.ProtocolSCTP),
Port: 53,
Scheduler: "rr",
@ -3720,7 +3719,7 @@ func TestCleanLegacyService(t *testing.T) {
ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion)
excludeCIDRs, _ := utilnet.ParseCIDRs([]string{"3.3.3.0/24", "4.4.4.0/24"})
excludeCIDRs, _ := netutils.ParseCIDRs([]string{"3.3.3.0/24", "4.4.4.0/24"})
fp := NewFakeProxier(ipt, ipvs, ipset, nil, excludeCIDRs, v1.IPv4Protocol)
// All ipvs services that were processed in the latest sync loop.
@ -3729,7 +3728,7 @@ func TestCleanLegacyService(t *testing.T) {
currentServices := map[string]*utilipvs.VirtualServer{
// Created by kube-proxy.
"ipvs0": {
Address: net.ParseIP("1.1.1.1"),
Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: string(v1.ProtocolUDP),
Port: 53,
Scheduler: "rr",
@ -3737,7 +3736,7 @@ func TestCleanLegacyService(t *testing.T) {
},
// Created by kube-proxy.
"ipvs1": {
Address: net.ParseIP("2.2.2.2"),
Address: netutils.ParseIPSloppy("2.2.2.2"),
Protocol: string(v1.ProtocolUDP),
Port: 54,
Scheduler: "rr",
@ -3745,7 +3744,7 @@ func TestCleanLegacyService(t *testing.T) {
},
// Created by an external party.
"ipvs2": {
Address: net.ParseIP("3.3.3.3"),
Address: netutils.ParseIPSloppy("3.3.3.3"),
Protocol: string(v1.ProtocolUDP),
Port: 55,
Scheduler: "rr",
@ -3753,7 +3752,7 @@ func TestCleanLegacyService(t *testing.T) {
},
// Created by an external party.
"ipvs3": {
Address: net.ParseIP("4.4.4.4"),
Address: netutils.ParseIPSloppy("4.4.4.4"),
Protocol: string(v1.ProtocolUDP),
Port: 56,
Scheduler: "rr",
@ -3761,7 +3760,7 @@ func TestCleanLegacyService(t *testing.T) {
},
// Created by an external party.
"ipvs4": {
Address: net.ParseIP("5.5.5.5"),
Address: netutils.ParseIPSloppy("5.5.5.5"),
Protocol: string(v1.ProtocolUDP),
Port: 57,
Scheduler: "rr",
@ -3769,7 +3768,7 @@ func TestCleanLegacyService(t *testing.T) {
},
// Created by kube-proxy, but now stale.
"ipvs5": {
Address: net.ParseIP("6.6.6.6"),
Address: netutils.ParseIPSloppy("6.6.6.6"),
Protocol: string(v1.ProtocolUDP),
Port: 58,
Scheduler: "rr",
@ -3812,7 +3811,7 @@ func TestCleanLegacyService(t *testing.T) {
// check that address "1.1.1.1", "2.2.2.2", "3.3.3.3", "4.4.4.4" are bound, ignore ipv6 addresses
remainingAddrsMap := make(map[string]bool)
for _, a := range remainingAddrs {
if net.ParseIP(a).To4() == nil {
if netutils.ParseIPSloppy(a).To4() == nil {
continue
}
remainingAddrsMap[a] = true
@ -3834,21 +3833,21 @@ func TestCleanLegacyServiceWithRealServers(t *testing.T) {
// All ipvs services in the system.
currentServices := map[string]*utilipvs.VirtualServer{
"ipvs0": { // deleted with real servers
Address: net.ParseIP("1.1.1.1"),
Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: string(v1.ProtocolUDP),
Port: 53,
Scheduler: "rr",
Flags: utilipvs.FlagHashed,
},
"ipvs1": { // deleted no real server
Address: net.ParseIP("2.2.2.2"),
Address: netutils.ParseIPSloppy("2.2.2.2"),
Protocol: string(v1.ProtocolUDP),
Port: 54,
Scheduler: "rr",
Flags: utilipvs.FlagHashed,
},
"ipvs2": { // not deleted
Address: net.ParseIP("3.3.3.3"),
Address: netutils.ParseIPSloppy("3.3.3.3"),
Protocol: string(v1.ProtocolUDP),
Port: 54,
Scheduler: "rr",
@ -3859,13 +3858,13 @@ func TestCleanLegacyServiceWithRealServers(t *testing.T) {
// "ipvs0" has a real server, but it should still be deleted since the Service is deleted
realServers := map[*utilipvs.VirtualServer]*utilipvs.RealServer{
{
Address: net.ParseIP("1.1.1.1"),
Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: string(v1.ProtocolUDP),
Port: 53,
Scheduler: "rr",
Flags: utilipvs.FlagHashed,
}: {
Address: net.ParseIP("10.180.0.1"),
Address: netutils.ParseIPSloppy("10.180.0.1"),
Port: uint16(53),
Weight: 1,
},
@ -3905,7 +3904,7 @@ func TestCleanLegacyServiceWithRealServers(t *testing.T) {
// check that address is "3.3.3.3"
remainingAddrsMap := make(map[string]bool)
for _, a := range remainingAddrs {
if net.ParseIP(a).To4() == nil {
if netutils.ParseIPSloppy(a).To4() == nil {
continue
}
remainingAddrsMap[a] = true
@ -3921,12 +3920,12 @@ func TestCleanLegacyRealServersExcludeCIDRs(t *testing.T) {
ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion)
gtm := NewGracefulTerminationManager(ipvs)
excludeCIDRs, _ := utilnet.ParseCIDRs([]string{"4.4.4.4/32"})
excludeCIDRs, _ := netutils.ParseCIDRs([]string{"4.4.4.4/32"})
fp := NewFakeProxier(ipt, ipvs, ipset, nil, excludeCIDRs, v1.IPv4Protocol)
fp.gracefuldeleteManager = gtm
vs := &utilipvs.VirtualServer{
Address: net.ParseIP("4.4.4.4"),
Address: netutils.ParseIPSloppy("4.4.4.4"),
Protocol: string(v1.ProtocolUDP),
Port: 56,
Scheduler: "rr",
@ -3937,13 +3936,13 @@ func TestCleanLegacyRealServersExcludeCIDRs(t *testing.T) {
rss := []*utilipvs.RealServer{
{
Address: net.ParseIP("10.10.10.10"),
Address: netutils.ParseIPSloppy("10.10.10.10"),
Port: 56,
ActiveConn: 0,
InactiveConn: 0,
},
{
Address: net.ParseIP("11.11.11.11"),
Address: netutils.ParseIPSloppy("11.11.11.11"),
Port: 56,
ActiveConn: 0,
InactiveConn: 0,
@ -3976,9 +3975,9 @@ func TestCleanLegacyService6(t *testing.T) {
ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion)
excludeCIDRs, _ := utilnet.ParseCIDRs([]string{"3000::/64", "4000::/64"})
excludeCIDRs, _ := netutils.ParseCIDRs([]string{"3000::/64", "4000::/64"})
fp := NewFakeProxier(ipt, ipvs, ipset, nil, excludeCIDRs, v1.IPv4Protocol)
fp.nodeIP = net.ParseIP("::1")
fp.nodeIP = netutils.ParseIPSloppy("::1")
// All ipvs services that were processed in the latest sync loop.
activeServices := map[string]bool{"ipvs0": true, "ipvs1": true}
@ -3986,7 +3985,7 @@ func TestCleanLegacyService6(t *testing.T) {
currentServices := map[string]*utilipvs.VirtualServer{
// Created by kube-proxy.
"ipvs0": {
Address: net.ParseIP("1000::1"),
Address: netutils.ParseIPSloppy("1000::1"),
Protocol: string(v1.ProtocolUDP),
Port: 53,
Scheduler: "rr",
@ -3994,7 +3993,7 @@ func TestCleanLegacyService6(t *testing.T) {
},
// Created by kube-proxy.
"ipvs1": {
Address: net.ParseIP("1000::2"),
Address: netutils.ParseIPSloppy("1000::2"),
Protocol: string(v1.ProtocolUDP),
Port: 54,
Scheduler: "rr",
@ -4002,7 +4001,7 @@ func TestCleanLegacyService6(t *testing.T) {
},
// Created by an external party.
"ipvs2": {
Address: net.ParseIP("3000::1"),
Address: netutils.ParseIPSloppy("3000::1"),
Protocol: string(v1.ProtocolUDP),
Port: 55,
Scheduler: "rr",
@ -4010,7 +4009,7 @@ func TestCleanLegacyService6(t *testing.T) {
},
// Created by an external party.
"ipvs3": {
Address: net.ParseIP("4000::1"),
Address: netutils.ParseIPSloppy("4000::1"),
Protocol: string(v1.ProtocolUDP),
Port: 56,
Scheduler: "rr",
@ -4018,7 +4017,7 @@ func TestCleanLegacyService6(t *testing.T) {
},
// Created by an external party.
"ipvs4": {
Address: net.ParseIP("5000::1"),
Address: netutils.ParseIPSloppy("5000::1"),
Protocol: string(v1.ProtocolUDP),
Port: 57,
Scheduler: "rr",
@ -4026,7 +4025,7 @@ func TestCleanLegacyService6(t *testing.T) {
},
// Created by kube-proxy, but now stale.
"ipvs5": {
Address: net.ParseIP("1000::6"),
Address: netutils.ParseIPSloppy("1000::6"),
Protocol: string(v1.ProtocolUDP),
Port: 58,
Scheduler: "rr",
@ -4069,7 +4068,7 @@ func TestCleanLegacyService6(t *testing.T) {
// check that address "1000::1", "1000::2", "3000::1", "4000::1" are still bound, ignore ipv4 addresses
remainingAddrsMap := make(map[string]bool)
for _, a := range remainingAddrs {
if net.ParseIP(a).To4() != nil {
if netutils.ParseIPSloppy(a).To4() != nil {
continue
}
remainingAddrsMap[a] = true

View File

@ -25,6 +25,7 @@ import (
"k8s.io/client-go/tools/events"
"k8s.io/klog/v2"
netutils "k8s.io/utils/net"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
@ -155,7 +156,7 @@ func (sct *ServiceChangeTracker) newBaseServiceInfo(port *v1.ServicePort, servic
clusterIP := utilproxy.GetClusterIPByFamily(sct.ipFamily, service)
info := &BaseServiceInfo{
clusterIP: net.ParseIP(clusterIP),
clusterIP: netutils.ParseIPSloppy(clusterIP),
port: int(port.Port),
protocol: port.Protocol,
nodePort: int(port.NodePort),

View File

@ -17,7 +17,6 @@ limitations under the License.
package proxy
import (
"net"
"reflect"
"testing"
"time"
@ -29,13 +28,14 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
netutils "k8s.io/utils/net"
)
const testHostname = "test-hostname"
func makeTestServiceInfo(clusterIP string, port int, protocol string, healthcheckNodePort int, svcInfoFuncs ...func(*BaseServiceInfo)) *BaseServiceInfo {
info := &BaseServiceInfo{
clusterIP: net.ParseIP(clusterIP),
clusterIP: netutils.ParseIPSloppy(clusterIP),
port: port,
protocol: v1.Protocol(protocol),
}

View File

@ -527,7 +527,7 @@ func (proxier *Proxier) mergeService(service *v1.Service) sets.String {
continue
}
serviceIP := net.ParseIP(service.Spec.ClusterIP)
serviceIP := netutils.ParseIPSloppy(service.Spec.ClusterIP)
klog.V(1).InfoS("Adding new service", "serviceName", serviceName, "addr", net.JoinHostPort(serviceIP.String(), strconv.Itoa(int(servicePort.Port))), "protocol", servicePort.Protocol)
info, err = proxier.addServiceOnPortInternal(serviceName, servicePort.Protocol, proxyPort, proxier.udpIdleTimeout)
if err != nil {
@ -711,7 +711,7 @@ func sameConfig(info *ServiceInfo, service *v1.Service, port *v1.ServicePort) bo
if info.protocol != port.Protocol || info.portal.port != int(port.Port) || info.nodePort != int(port.NodePort) {
return false
}
if !info.portal.ip.Equal(net.ParseIP(service.Spec.ClusterIP)) {
if !info.portal.ip.Equal(netutils.ParseIPSloppy(service.Spec.ClusterIP)) {
return false
}
if !ipsEqual(info.externalIPs, service.Spec.ExternalIPs) {
@ -744,14 +744,14 @@ func (proxier *Proxier) openPortal(service proxy.ServicePortName, info *ServiceI
return err
}
for _, publicIP := range info.externalIPs {
err = proxier.openOnePortal(portal{net.ParseIP(publicIP), info.portal.port, true}, info.protocol, proxier.listenIP, info.proxyPort, service)
err = proxier.openOnePortal(portal{netutils.ParseIPSloppy(publicIP), info.portal.port, true}, info.protocol, proxier.listenIP, info.proxyPort, service)
if err != nil {
return err
}
}
for _, ingress := range info.loadBalancerStatus.Ingress {
if ingress.IP != "" {
err = proxier.openOnePortal(portal{net.ParseIP(ingress.IP), info.portal.port, false}, info.protocol, proxier.listenIP, info.proxyPort, service)
err = proxier.openOnePortal(portal{netutils.ParseIPSloppy(ingress.IP), info.portal.port, false}, info.protocol, proxier.listenIP, info.proxyPort, service)
if err != nil {
return err
}
@ -923,11 +923,11 @@ func (proxier *Proxier) closePortal(service proxy.ServicePortName, info *Service
// Collect errors and report them all at the end.
el := proxier.closeOnePortal(info.portal, info.protocol, proxier.listenIP, info.proxyPort, service)
for _, publicIP := range info.externalIPs {
el = append(el, proxier.closeOnePortal(portal{net.ParseIP(publicIP), info.portal.port, true}, info.protocol, proxier.listenIP, info.proxyPort, service)...)
el = append(el, proxier.closeOnePortal(portal{netutils.ParseIPSloppy(publicIP), info.portal.port, true}, info.protocol, proxier.listenIP, info.proxyPort, service)...)
}
for _, ingress := range info.loadBalancerStatus.Ingress {
if ingress.IP != "" {
el = append(el, proxier.closeOnePortal(portal{net.ParseIP(ingress.IP), info.portal.port, false}, info.protocol, proxier.listenIP, info.proxyPort, service)...)
el = append(el, proxier.closeOnePortal(portal{netutils.ParseIPSloppy(ingress.IP), info.portal.port, false}, info.protocol, proxier.listenIP, info.proxyPort, service)...)
}
}
if info.nodePort != 0 {
@ -1116,11 +1116,11 @@ func iptablesFlush(ipt iptables.Interface) error {
}
// Used below.
var zeroIPv4 = net.ParseIP("0.0.0.0")
var localhostIPv4 = net.ParseIP("127.0.0.1")
var zeroIPv4 = netutils.ParseIPSloppy("0.0.0.0")
var localhostIPv4 = netutils.ParseIPSloppy("127.0.0.1")
var zeroIPv6 = net.ParseIP("::")
var localhostIPv6 = net.ParseIP("::1")
var zeroIPv6 = netutils.ParseIPSloppy("::")
var localhostIPv6 = netutils.ParseIPSloppy("::1")
// Build a slice of iptables args that are common to from-container and from-host portal rules.
func iptablesCommonPortalArgs(destIP net.IP, addPhysicalInterfaceMatch bool, addDstLocalMatch bool, destPort int, protocol v1.Protocol, service proxy.ServicePortName) []string {

View File

@ -39,6 +39,7 @@ import (
ipttest "k8s.io/kubernetes/pkg/util/iptables/testing"
"k8s.io/utils/exec"
fakeexec "k8s.io/utils/exec/testing"
netutils "k8s.io/utils/net"
)
const (
@ -328,7 +329,7 @@ func TestTCPProxy(t *testing.T) {
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
@ -353,7 +354,7 @@ func TestUDPProxy(t *testing.T) {
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
@ -378,7 +379,7 @@ func TestUDPProxyTimeout(t *testing.T) {
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
@ -417,7 +418,7 @@ func TestMultiPortProxy(t *testing.T) {
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
@ -440,7 +441,7 @@ func TestMultiPortOnServiceAdd(t *testing.T) {
lb := NewLoadBalancerRR()
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
@ -473,7 +474,7 @@ func TestTCPProxyStop(t *testing.T) {
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
@ -505,7 +506,7 @@ func TestUDPProxyStop(t *testing.T) {
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
@ -537,7 +538,7 @@ func TestTCPProxyUpdateDelete(t *testing.T) {
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
@ -569,7 +570,7 @@ func TestUDPProxyUpdateDelete(t *testing.T) {
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
@ -602,7 +603,7 @@ func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
@ -640,7 +641,7 @@ func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
@ -678,7 +679,7 @@ func TestTCPProxyUpdatePort(t *testing.T) {
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
@ -716,7 +717,7 @@ func TestUDPProxyUpdatePort(t *testing.T) {
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
@ -754,7 +755,7 @@ func TestProxyUpdatePublicIPs(t *testing.T) {
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
@ -793,7 +794,7 @@ func TestProxyUpdatePortal(t *testing.T) {
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
@ -851,7 +852,7 @@ func TestOnServiceAddChangeMap(t *testing.T) {
fexec := makeFakeExec()
// Use long minSyncPeriod so we can test that immediate syncs work
p, err := createProxier(NewLoadBalancerRR(), net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Minute, udpIdleTimeoutForTest, newProxySocket)
p, err := createProxier(NewLoadBalancerRR(), netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Minute, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}

View File

@ -22,13 +22,14 @@ import (
"strconv"
"k8s.io/klog/v2"
netutils "k8s.io/utils/net"
)
// IPPart returns just the IP part of an IP or IP:port or endpoint string. If the IP
// part is an IPv6 address enclosed in brackets (e.g. "[fd00:1::5]:9999"),
// then the brackets are stripped as well.
func IPPart(s string) string {
if ip := net.ParseIP(s); ip != nil {
if ip := netutils.ParseIPSloppy(s); ip != nil {
// IP address without port
return s
}
@ -39,7 +40,7 @@ func IPPart(s string) string {
return ""
}
// Check if host string is a valid IP address
ip := net.ParseIP(host)
ip := netutils.ParseIPSloppy(host)
if ip == nil {
klog.Errorf("invalid IP part '%s'", host)
return ""

View File

@ -17,8 +17,9 @@ limitations under the License.
package util
import (
"net"
"testing"
netutils "k8s.io/utils/net"
)
func TestIPPart(t *testing.T) {
@ -112,7 +113,7 @@ func TestToCIDR(t *testing.T) {
}
for _, tc := range testCases {
ip := net.ParseIP(tc.ip)
ip := netutils.ParseIPSloppy(tc.ip)
addr := ToCIDR(ip)
if addr != tc.expectedAddr {
t.Errorf("Unexpected host address for %s: Expected: %s, Got %s", tc.ip, tc.expectedAddr, addr)

View File

@ -18,11 +18,10 @@ package iptables
import (
"fmt"
"net"
"k8s.io/klog/v2"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
utilnet "k8s.io/utils/net"
netutils "k8s.io/utils/net"
)
// LocalTrafficDetector in a interface to take action (jump) based on whether traffic originated locally
@ -66,10 +65,10 @@ type detectLocalByCIDR struct {
// NewDetectLocalByCIDR implements the LocalTrafficDetector interface using a CIDR. This can be used when a single CIDR
// range can be used to capture the notion of local traffic.
func NewDetectLocalByCIDR(cidr string, ipt utiliptables.Interface) (LocalTrafficDetector, error) {
if utilnet.IsIPv6CIDRString(cidr) != ipt.IsIPv6() {
if netutils.IsIPv6CIDRString(cidr) != ipt.IsIPv6() {
return nil, fmt.Errorf("CIDR %s has incorrect IP version: expect isIPv6=%t", cidr, ipt.IsIPv6())
}
_, _, err := net.ParseCIDR(cidr)
_, _, err := netutils.ParseCIDRSloppy(cidr)
if err != nil {
return nil, err
}

View File

@ -32,7 +32,7 @@ import (
"k8s.io/client-go/tools/events"
helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
utilsysctl "k8s.io/kubernetes/pkg/util/sysctl"
utilnet "k8s.io/utils/net"
netutils "k8s.io/utils/net"
"k8s.io/klog/v2"
)
@ -88,7 +88,7 @@ func IsZeroCIDR(cidr string) bool {
// IsProxyableIP checks if a given IP address is permitted to be proxied
func IsProxyableIP(ip string) error {
netIP := net.ParseIP(ip)
netIP := netutils.ParseIPSloppy(ip)
if netIP == nil {
return ErrAddressNotAllowed
}
@ -146,7 +146,7 @@ func GetLocalAddrs() ([]net.IP, error) {
}
for _, addr := range addrs {
ip, _, err := net.ParseCIDR(addr.String())
ip, _, err := netutils.ParseCIDRSloppy(addr.String())
if err != nil {
return nil, err
}
@ -159,7 +159,7 @@ func GetLocalAddrs() ([]net.IP, error) {
// GetLocalAddrSet return a local IPSet.
// If failed to get local addr, will assume no local ips.
func GetLocalAddrSet() utilnet.IPSet {
func GetLocalAddrSet() netutils.IPSet {
localAddrs, err := GetLocalAddrs()
if err != nil {
klog.ErrorS(err, "Failed to get local addresses assuming no local IPs")
@ -167,7 +167,7 @@ func GetLocalAddrSet() utilnet.IPSet {
klog.InfoS("No local addresses were found")
}
localAddrSet := utilnet.IPSet{}
localAddrSet := netutils.IPSet{}
localAddrSet.Insert(localAddrs...)
return localAddrSet
}
@ -220,7 +220,7 @@ func GetNodeAddresses(cidrs []string, nw NetworkInterfacer) (sets.String, error)
continue
}
_, ipNet, _ := net.ParseCIDR(cidr)
_, ipNet, _ := netutils.ParseCIDRSloppy(cidr)
for _, addr := range addrs {
var ip net.IP
// nw.InterfaceAddrs may return net.IPAddr or net.IPNet on windows, and it will return net.IPNet on linux.
@ -234,10 +234,10 @@ func GetNodeAddresses(cidrs []string, nw NetworkInterfacer) (sets.String, error)
}
if ipNet.Contains(ip) {
if utilnet.IsIPv6(ip) && !uniqueAddressList.Has(IPv6ZeroCIDR) {
if netutils.IsIPv6(ip) && !uniqueAddressList.Has(IPv6ZeroCIDR) {
uniqueAddressList.Insert(ip.String())
}
if !utilnet.IsIPv6(ip) && !uniqueAddressList.Has(IPv4ZeroCIDR) {
if !netutils.IsIPv6(ip) && !uniqueAddressList.Has(IPv4ZeroCIDR) {
uniqueAddressList.Insert(ip.String())
}
}
@ -295,23 +295,23 @@ func MapCIDRsByIPFamily(cidrStrings []string) map[v1.IPFamily][]string {
}
func getIPFamilyFromIP(ipStr string) (v1.IPFamily, error) {
netIP := net.ParseIP(ipStr)
netIP := netutils.ParseIPSloppy(ipStr)
if netIP == nil {
return "", ErrAddressNotAllowed
}
if utilnet.IsIPv6(netIP) {
if netutils.IsIPv6(netIP) {
return v1.IPv6Protocol, nil
}
return v1.IPv4Protocol, nil
}
func getIPFamilyFromCIDR(cidrStr string) (v1.IPFamily, error) {
_, netCIDR, err := net.ParseCIDR(cidrStr)
_, netCIDR, err := netutils.ParseCIDRSloppy(cidrStr)
if err != nil {
return "", ErrAddressNotAllowed
}
if utilnet.IsIPv6CIDR(netCIDR) {
if netutils.IsIPv6CIDR(netCIDR) {
return v1.IPv6Protocol, nil
}
return v1.IPv4Protocol, nil
@ -335,7 +335,7 @@ func AppendPortIfNeeded(addr string, port int32) string {
}
// Simply return for invalid case. This should be caught by validation instead.
ip := net.ParseIP(addr)
ip := netutils.ParseIPSloppy(addr)
if ip == nil {
return addr
}
@ -441,7 +441,7 @@ func GetClusterIPByFamily(ipFamily v1.IPFamily, service *v1.Service) string {
}
IsIPv6Family := (ipFamily == v1.IPv6Protocol)
if IsIPv6Family == utilnet.IsIPv6String(service.Spec.ClusterIP) {
if IsIPv6Family == netutils.IsIPv6String(service.Spec.ClusterIP) {
return service.Spec.ClusterIP
}
@ -492,7 +492,7 @@ func WriteBytesLine(buf *bytes.Buffer, bytes []byte) {
// RevertPorts is closing ports in replacementPortsMap but not in originalPortsMap. In other words, it only
// closes the ports opened in this sync.
func RevertPorts(replacementPortsMap, originalPortsMap map[utilnet.LocalPort]utilnet.Closeable) {
func RevertPorts(replacementPortsMap, originalPortsMap map[netutils.LocalPort]netutils.Closeable) {
for k, v := range replacementPortsMap {
// Only close newly opened local ports - leave ones that were open before this update
if originalPortsMap[k] == nil {

View File

@ -30,7 +30,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
fake "k8s.io/kubernetes/pkg/proxy/util/testing"
utilnet "k8s.io/utils/net"
netutils "k8s.io/utils/net"
)
func TestValidateWorks(t *testing.T) {
@ -141,7 +141,7 @@ func (r *dummyResolver) LookupIPAddr(ctx context.Context, host string) ([]net.IP
}
resp := []net.IPAddr{}
for _, ipString := range r.ips {
resp = append(resp, net.IPAddr{IP: net.ParseIP(ipString)})
resp = append(resp, net.IPAddr{IP: netutils.ParseIPSloppy(ipString)})
}
return resp, nil
}
@ -187,13 +187,13 @@ func TestIsAllowedHost(t *testing.T) {
for i := range testCases {
var denyList []*net.IPNet
for _, cidrStr := range testCases[i].denied {
_, ipNet, err := net.ParseCIDR(cidrStr)
_, ipNet, err := netutils.ParseCIDRSloppy(cidrStr)
if err != nil {
t.Fatalf("bad IP for test case: %v: %v", cidrStr, err)
}
denyList = append(denyList, ipNet)
}
got := IsAllowedHost(net.ParseIP(testCases[i].ip), denyList)
got := IsAllowedHost(netutils.ParseIPSloppy(testCases[i].ip), denyList)
if testCases[i].want != got {
t.Errorf("case %d: expected %v, got %v", i, testCases[i].want, got)
}
@ -281,7 +281,7 @@ func TestShouldSkipService(t *testing.T) {
func TestNewFilteredDialContext(t *testing.T) {
_, cidr, _ := net.ParseCIDR("1.1.1.1/28")
_, cidr, _ := netutils.ParseCIDRSloppy("1.1.1.1/28")
testCases := []struct {
name string
@ -324,7 +324,7 @@ func TestNewFilteredDialContext(t *testing.T) {
opts: &FilteredDialOptions{AllowLocalLoopback: false},
dial: "127.0.0.1:8080",
expectResolve: "127.0.0.1",
resolveTo: []net.IPAddr{{IP: net.ParseIP("127.0.0.1")}},
resolveTo: []net.IPAddr{{IP: netutils.ParseIPSloppy("127.0.0.1")}},
expectWrappedDial: false,
expectErr: "address not allowed",
},
@ -333,7 +333,7 @@ func TestNewFilteredDialContext(t *testing.T) {
opts: &FilteredDialOptions{AllowLocalLoopback: false, DialHostCIDRDenylist: []*net.IPNet{cidr}},
dial: "foo.com:8080",
expectResolve: "foo.com",
resolveTo: []net.IPAddr{{IP: net.ParseIP("1.1.1.1")}},
resolveTo: []net.IPAddr{{IP: netutils.ParseIPSloppy("1.1.1.1")}},
expectWrappedDial: false,
expectErr: "address not allowed",
},
@ -342,7 +342,7 @@ func TestNewFilteredDialContext(t *testing.T) {
opts: &FilteredDialOptions{AllowLocalLoopback: false, DialHostCIDRDenylist: []*net.IPNet{cidr}},
dial: "foo.com:8080",
expectResolve: "foo.com",
resolveTo: []net.IPAddr{{IP: net.ParseIP("2.2.2.2")}},
resolveTo: []net.IPAddr{{IP: netutils.ParseIPSloppy("2.2.2.2")}},
expectWrappedDial: true,
expectErr: "",
},
@ -417,11 +417,11 @@ func TestGetNodeAddresses(t *testing.T) {
itfAddrsPairs: []InterfaceAddrsPair{
{
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("10.20.30.51"), Mask: net.CIDRMask(24, 32)}},
addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("10.20.30.51"), Mask: net.CIDRMask(24, 32)}},
},
{
itf: net.Interface{Index: 2, MTU: 0, Name: "eth1", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("100.200.201.1"), Mask: net.CIDRMask(24, 32)}},
addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("100.200.201.1"), Mask: net.CIDRMask(24, 32)}},
},
},
expected: sets.NewString("10.20.30.51"),
@ -432,11 +432,11 @@ func TestGetNodeAddresses(t *testing.T) {
itfAddrsPairs: []InterfaceAddrsPair{
{
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("10.20.30.51"), Mask: net.CIDRMask(24, 32)}},
addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("10.20.30.51"), Mask: net.CIDRMask(24, 32)}},
},
{
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("127.0.0.1"), Mask: net.CIDRMask(8, 32)}},
addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("127.0.0.1"), Mask: net.CIDRMask(8, 32)}},
},
},
expected: sets.NewString("0.0.0.0/0"),
@ -447,11 +447,11 @@ func TestGetNodeAddresses(t *testing.T) {
itfAddrsPairs: []InterfaceAddrsPair{
{
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("2001:db8::1"), Mask: net.CIDRMask(32, 128)}},
addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("2001:db8::1"), Mask: net.CIDRMask(32, 128)}},
},
{
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("::1"), Mask: net.CIDRMask(128, 128)}},
addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("::1"), Mask: net.CIDRMask(128, 128)}},
},
},
expected: sets.NewString("2001:db8::1", "::1"),
@ -462,11 +462,11 @@ func TestGetNodeAddresses(t *testing.T) {
itfAddrsPairs: []InterfaceAddrsPair{
{
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("2001:db8::1"), Mask: net.CIDRMask(32, 128)}},
addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("2001:db8::1"), Mask: net.CIDRMask(32, 128)}},
},
{
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("::1"), Mask: net.CIDRMask(128, 128)}},
addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("::1"), Mask: net.CIDRMask(128, 128)}},
},
},
expected: sets.NewString("::/0"),
@ -477,11 +477,11 @@ func TestGetNodeAddresses(t *testing.T) {
itfAddrsPairs: []InterfaceAddrsPair{
{
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("10.20.30.51"), Mask: net.CIDRMask(24, 32)}},
addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("10.20.30.51"), Mask: net.CIDRMask(24, 32)}},
},
{
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("127.0.0.1"), Mask: net.CIDRMask(8, 32)}},
addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("127.0.0.1"), Mask: net.CIDRMask(8, 32)}},
},
},
expected: sets.NewString("127.0.0.1"),
@ -492,7 +492,7 @@ func TestGetNodeAddresses(t *testing.T) {
itfAddrsPairs: []InterfaceAddrsPair{
{
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("127.0.1.1"), Mask: net.CIDRMask(8, 32)}},
addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("127.0.1.1"), Mask: net.CIDRMask(8, 32)}},
},
},
expected: sets.NewString("127.0.1.1"),
@ -503,11 +503,11 @@ func TestGetNodeAddresses(t *testing.T) {
itfAddrsPairs: []InterfaceAddrsPair{
{
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("10.20.30.51"), Mask: net.CIDRMask(24, 32)}},
addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("10.20.30.51"), Mask: net.CIDRMask(24, 32)}},
},
{
itf: net.Interface{Index: 2, MTU: 0, Name: "eth1", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("100.200.201.1"), Mask: net.CIDRMask(24, 32)}},
addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("100.200.201.1"), Mask: net.CIDRMask(24, 32)}},
},
},
expected: sets.NewString("10.20.30.51", "100.200.201.1"),
@ -518,11 +518,11 @@ func TestGetNodeAddresses(t *testing.T) {
itfAddrsPairs: []InterfaceAddrsPair{
{
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("192.168.1.2"), Mask: net.CIDRMask(24, 32)}},
addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("192.168.1.2"), Mask: net.CIDRMask(24, 32)}},
},
{
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("127.0.0.1"), Mask: net.CIDRMask(8, 32)}},
addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("127.0.0.1"), Mask: net.CIDRMask(8, 32)}},
},
},
expected: nil,
@ -534,11 +534,11 @@ func TestGetNodeAddresses(t *testing.T) {
itfAddrsPairs: []InterfaceAddrsPair{
{
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("192.168.1.2"), Mask: net.CIDRMask(24, 32)}},
addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("192.168.1.2"), Mask: net.CIDRMask(24, 32)}},
},
{
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("127.0.0.1"), Mask: net.CIDRMask(8, 32)}},
addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("127.0.0.1"), Mask: net.CIDRMask(8, 32)}},
},
},
expected: sets.NewString("0.0.0.0/0", "::/0"),
@ -549,11 +549,11 @@ func TestGetNodeAddresses(t *testing.T) {
itfAddrsPairs: []InterfaceAddrsPair{
{
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("2001:db8::1"), Mask: net.CIDRMask(32, 128)}},
addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("2001:db8::1"), Mask: net.CIDRMask(32, 128)}},
},
{
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("::1"), Mask: net.CIDRMask(128, 128)}},
addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("::1"), Mask: net.CIDRMask(128, 128)}},
},
},
expected: sets.NewString("0.0.0.0/0", "::/0"),
@ -564,7 +564,7 @@ func TestGetNodeAddresses(t *testing.T) {
itfAddrsPairs: []InterfaceAddrsPair{
{
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("1.2.3.4"), Mask: net.CIDRMask(30, 32)}},
addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("1.2.3.4"), Mask: net.CIDRMask(30, 32)}},
},
},
expected: sets.NewString("0.0.0.0/0"),
@ -575,11 +575,11 @@ func TestGetNodeAddresses(t *testing.T) {
itfAddrsPairs: []InterfaceAddrsPair{
{
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("1.2.3.4"), Mask: net.CIDRMask(30, 32)}},
addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("1.2.3.4"), Mask: net.CIDRMask(30, 32)}},
},
{
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("::1"), Mask: net.CIDRMask(128, 128)}},
addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("::1"), Mask: net.CIDRMask(128, 128)}},
},
},
expected: sets.NewString("0.0.0.0/0", "::1"),
@ -590,11 +590,11 @@ func TestGetNodeAddresses(t *testing.T) {
itfAddrsPairs: []InterfaceAddrsPair{
{
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("1.2.3.4"), Mask: net.CIDRMask(30, 32)}},
addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("1.2.3.4"), Mask: net.CIDRMask(30, 32)}},
},
{
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("::1"), Mask: net.CIDRMask(128, 128)}},
addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("::1"), Mask: net.CIDRMask(128, 128)}},
},
},
expected: sets.NewString("::/0", "1.2.3.4"),
@ -1063,22 +1063,22 @@ func (c *fakeClosable) Close() error {
func TestRevertPorts(t *testing.T) {
testCases := []struct {
replacementPorts []utilnet.LocalPort
existingPorts []utilnet.LocalPort
replacementPorts []netutils.LocalPort
existingPorts []netutils.LocalPort
expectToBeClose []bool
}{
{
replacementPorts: []utilnet.LocalPort{
replacementPorts: []netutils.LocalPort{
{Port: 5001},
{Port: 5002},
{Port: 5003},
},
existingPorts: []utilnet.LocalPort{},
existingPorts: []netutils.LocalPort{},
expectToBeClose: []bool{true, true, true},
},
{
replacementPorts: []utilnet.LocalPort{},
existingPorts: []utilnet.LocalPort{
replacementPorts: []netutils.LocalPort{},
existingPorts: []netutils.LocalPort{
{Port: 5001},
{Port: 5002},
{Port: 5003},
@ -1086,12 +1086,12 @@ func TestRevertPorts(t *testing.T) {
expectToBeClose: []bool{},
},
{
replacementPorts: []utilnet.LocalPort{
replacementPorts: []netutils.LocalPort{
{Port: 5001},
{Port: 5002},
{Port: 5003},
},
existingPorts: []utilnet.LocalPort{
existingPorts: []netutils.LocalPort{
{Port: 5001},
{Port: 5002},
{Port: 5003},
@ -1099,24 +1099,24 @@ func TestRevertPorts(t *testing.T) {
expectToBeClose: []bool{false, false, false},
},
{
replacementPorts: []utilnet.LocalPort{
replacementPorts: []netutils.LocalPort{
{Port: 5001},
{Port: 5002},
{Port: 5003},
},
existingPorts: []utilnet.LocalPort{
existingPorts: []netutils.LocalPort{
{Port: 5001},
{Port: 5003},
},
expectToBeClose: []bool{false, true, false},
},
{
replacementPorts: []utilnet.LocalPort{
replacementPorts: []netutils.LocalPort{
{Port: 5001},
{Port: 5002},
{Port: 5003},
},
existingPorts: []utilnet.LocalPort{
existingPorts: []netutils.LocalPort{
{Port: 5001},
{Port: 5002},
{Port: 5003},
@ -1127,11 +1127,11 @@ func TestRevertPorts(t *testing.T) {
}
for i, tc := range testCases {
replacementPortsMap := make(map[utilnet.LocalPort]utilnet.Closeable)
replacementPortsMap := make(map[netutils.LocalPort]netutils.Closeable)
for _, lp := range tc.replacementPorts {
replacementPortsMap[lp] = &fakeClosable{}
}
existingPortsMap := make(map[utilnet.LocalPort]utilnet.Closeable)
existingPortsMap := make(map[netutils.LocalPort]netutils.Closeable)
for _, lp := range tc.existingPorts {
existingPortsMap[lp] = &fakeClosable{}
}

View File

@ -21,10 +21,11 @@ package winkernel
import (
"encoding/json"
"fmt"
"strings"
"github.com/Microsoft/hcsshim"
"k8s.io/klog/v2"
"net"
"strings"
netutils "k8s.io/utils/net"
)
type HostNetworkService interface {
@ -113,7 +114,7 @@ func (hns hnsV1) createEndpoint(ep *endpointsInfo, networkName string) (*endpoin
}
hnsEndpoint := &hcsshim.HNSEndpoint{
MacAddress: ep.macAddress,
IPAddress: net.ParseIP(ep.ip),
IPAddress: netutils.ParseIPSloppy(ep.ip),
}
var createdEndpoint *hcsshim.HNSEndpoint

View File

@ -52,7 +52,7 @@ import (
"k8s.io/kubernetes/pkg/proxy/metaproxier"
"k8s.io/kubernetes/pkg/proxy/metrics"
"k8s.io/kubernetes/pkg/util/async"
utilnet "k8s.io/utils/net"
netutils "k8s.io/utils/net"
)
// KernelCompatTester tests whether the required kernel capabilities are
@ -424,7 +424,7 @@ func (proxier *Proxier) newEndpointInfo(baseInfo *proxy.BaseEndpointInfo) proxy.
ip: baseInfo.IP(),
port: uint16(portNumber),
isLocal: baseInfo.GetIsLocal(),
macAddress: conjureMac("02-11", net.ParseIP(baseInfo.IP())),
macAddress: conjureMac("02-11", netutils.ParseIPSloppy(baseInfo.IP())),
refCount: new(uint16),
hnsID: "",
hns: proxier.hns,
@ -510,7 +510,7 @@ func (proxier *Proxier) newServiceInfo(port *v1.ServicePort, service *v1.Service
}
for _, ingress := range service.Status.LoadBalancer.Ingress {
if net.ParseIP(ingress.IP) != nil {
if netutils.ParseIPSloppy(ingress.IP) != nil {
info.loadBalancerIngressIPs = append(info.loadBalancerIngressIPs, &loadBalancerIngressInfo{ip: ingress.IP})
}
}
@ -520,11 +520,11 @@ func (proxier *Proxier) newServiceInfo(port *v1.ServicePort, service *v1.Service
func (network hnsNetworkInfo) findRemoteSubnetProviderAddress(ip string) string {
var providerAddress string
for _, rs := range network.remoteSubnets {
_, ipNet, err := net.ParseCIDR(rs.destinationPrefix)
_, ipNet, err := netutils.ParseCIDRSloppy(rs.destinationPrefix)
if err != nil {
klog.ErrorS(err, "Failed to parse CIDR")
}
if ipNet.Contains(net.ParseIP(ip)) {
if ipNet.Contains(netutils.ParseIPSloppy(ip)) {
providerAddress = rs.providerAddress
}
if ip == rs.providerAddress {
@ -634,7 +634,7 @@ func NewProxier(
if nodeIP == nil {
klog.InfoS("invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP")
nodeIP = net.ParseIP("127.0.0.1")
nodeIP = netutils.ParseIPSloppy("127.0.0.1")
}
if len(clusterCIDR) == 0 {
@ -705,7 +705,7 @@ func NewProxier(
for _, inter := range interfaces {
addresses, _ := inter.Addrs()
for _, addr := range addresses {
addrIP, _, _ := net.ParseCIDR(addr.String())
addrIP, _, _ := netutils.ParseCIDRSloppy(addr.String())
if addrIP.String() == nodeIP.String() {
klog.V(2).InfoS("record Host MAC address", "addr", inter.HardwareAddr.String())
hostMac = inter.HardwareAddr.String()
@ -717,7 +717,7 @@ func NewProxier(
}
}
isIPv6 := utilnet.IsIPv6(nodeIP)
isIPv6 := netutils.IsIPv6(nodeIP)
proxier := &Proxier{
endPointsRefCount: make(endPointsReferenceCountMap),
serviceMap: make(proxy.ServiceMap),
@ -1179,7 +1179,7 @@ func (proxier *Proxier) syncProxyRules() {
hnsEndpoint := &endpointsInfo{
ip: ep.ip,
isLocal: false,
macAddress: conjureMac("02-11", net.ParseIP(ep.ip)),
macAddress: conjureMac("02-11", netutils.ParseIPSloppy(ep.ip)),
providerAddress: providerAddress,
}

View File

@ -33,6 +33,7 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/kubernetes/pkg/proxy"
"k8s.io/kubernetes/pkg/proxy/healthcheck"
netutils "k8s.io/utils/net"
utilpointer "k8s.io/utils/pointer"
)
@ -73,9 +74,9 @@ func (hns fakeHNS) getEndpointByID(id string) (*endpointsInfo, error) {
}
func (hns fakeHNS) getEndpointByIpAddress(ip string, networkName string) (*endpointsInfo, error) {
_, ipNet, _ := net.ParseCIDR(destinationPrefix)
_, ipNet, _ := netutils.ParseCIDRSloppy(destinationPrefix)
if ipNet.Contains(net.ParseIP(ip)) {
if ipNet.Contains(netutils.ParseIPSloppy(ip)) {
return &endpointsInfo{
ip: ip,
isLocal: false,
@ -144,7 +145,7 @@ func NewFakeProxier(syncPeriod time.Duration, minSyncPeriod time.Duration, clust
func TestCreateServiceVip(t *testing.T) {
syncPeriod := 30 * time.Second
proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", net.ParseIP("10.0.0.1"), NETWORK_TYPE_OVERLAY)
proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", netutils.ParseIPSloppy("10.0.0.1"), NETWORK_TYPE_OVERLAY)
if proxier == nil {
t.Error()
}
@ -199,7 +200,7 @@ func TestCreateServiceVip(t *testing.T) {
func TestCreateRemoteEndpointOverlay(t *testing.T) {
syncPeriod := 30 * time.Second
proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", net.ParseIP("10.0.0.1"), NETWORK_TYPE_OVERLAY)
proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", netutils.ParseIPSloppy("10.0.0.1"), NETWORK_TYPE_OVERLAY)
if proxier == nil {
t.Error()
}
@ -264,7 +265,7 @@ func TestCreateRemoteEndpointOverlay(t *testing.T) {
func TestCreateRemoteEndpointL2Bridge(t *testing.T) {
syncPeriod := 30 * time.Second
proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", net.ParseIP("10.0.0.1"), "L2Bridge")
proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", netutils.ParseIPSloppy("10.0.0.1"), "L2Bridge")
if proxier == nil {
t.Error()
}
@ -328,7 +329,7 @@ func TestCreateRemoteEndpointL2Bridge(t *testing.T) {
func TestSharedRemoteEndpointDelete(t *testing.T) {
syncPeriod := 30 * time.Second
tcpProtocol := v1.ProtocolTCP
proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", net.ParseIP("10.0.0.1"), "L2Bridge")
proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", netutils.ParseIPSloppy("10.0.0.1"), "L2Bridge")
if proxier == nil {
t.Error()
}
@ -470,7 +471,7 @@ func TestSharedRemoteEndpointDelete(t *testing.T) {
}
func TestSharedRemoteEndpointUpdate(t *testing.T) {
syncPeriod := 30 * time.Second
proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", net.ParseIP("10.0.0.1"), "L2Bridge")
proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", netutils.ParseIPSloppy("10.0.0.1"), "L2Bridge")
if proxier == nil {
t.Error()
}
@ -645,7 +646,7 @@ func TestSharedRemoteEndpointUpdate(t *testing.T) {
func TestCreateLoadBalancer(t *testing.T) {
syncPeriod := 30 * time.Second
tcpProtocol := v1.ProtocolTCP
proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", net.ParseIP("10.0.0.1"), NETWORK_TYPE_OVERLAY)
proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", netutils.ParseIPSloppy("10.0.0.1"), NETWORK_TYPE_OVERLAY)
if proxier == nil {
t.Error()
}
@ -703,7 +704,7 @@ func TestCreateLoadBalancer(t *testing.T) {
func TestCreateDsrLoadBalancer(t *testing.T) {
syncPeriod := 30 * time.Second
proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", net.ParseIP("10.0.0.1"), NETWORK_TYPE_OVERLAY)
proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", netutils.ParseIPSloppy("10.0.0.1"), NETWORK_TYPE_OVERLAY)
if proxier == nil {
t.Error()
}
@ -765,7 +766,7 @@ func TestCreateDsrLoadBalancer(t *testing.T) {
func TestEndpointSlice(t *testing.T) {
syncPeriod := 30 * time.Second
proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", net.ParseIP("10.0.0.1"), NETWORK_TYPE_OVERLAY)
proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", netutils.ParseIPSloppy("10.0.0.1"), NETWORK_TYPE_OVERLAY)
if proxier == nil {
t.Error()
}

View File

@ -26,6 +26,7 @@ import (
"time"
"k8s.io/klog/v2"
netutils "k8s.io/utils/net"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
@ -106,8 +107,8 @@ var (
)
// Used below.
var localhostIPv4 = net.ParseIP("127.0.0.1")
var localhostIPv6 = net.ParseIP("::1")
var localhostIPv4 = netutils.ParseIPSloppy("127.0.0.1")
var localhostIPv6 = netutils.ParseIPSloppy("::1")
// NewProxier returns a new Proxier given a LoadBalancer and an address on
// which to listen. It is assumed that there is only a single Proxier active
@ -208,7 +209,7 @@ func (proxier *Proxier) setServiceInfo(service ServicePortPortalName, info *serv
func (proxier *Proxier) addServicePortPortal(servicePortPortalName ServicePortPortalName, protocol v1.Protocol, listenIP string, port int, timeout time.Duration) (*serviceInfo, error) {
var serviceIP net.IP
if listenIP != allAvailableInterfaces {
if serviceIP = net.ParseIP(listenIP); serviceIP == nil {
if serviceIP = netutils.ParseIPSloppy(listenIP); serviceIP == nil {
return nil, fmt.Errorf("could not parse ip '%q'", listenIP)
}
// add the IP address. Node port binds to all interfaces.
@ -259,7 +260,7 @@ func (proxier *Proxier) closeServicePortPortal(servicePortPortalName ServicePort
// close the PortalProxy by deleting the service IP address
if info.portal.ip != allAvailableInterfaces {
serviceIP := net.ParseIP(info.portal.ip)
serviceIP := netutils.ParseIPSloppy(info.portal.ip)
args := proxier.netshIPv4AddressDeleteArgs(serviceIP)
if err := proxier.netsh.DeleteIPAddress(args); err != nil {
return err

View File

@ -36,6 +36,7 @@ import (
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/proxy"
netshtest "k8s.io/kubernetes/pkg/util/netsh/testing"
netutils "k8s.io/utils/net"
)
const (
@ -251,7 +252,7 @@ func TestTCPProxy(t *testing.T) {
})
listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil {
t.Fatal(err)
}
@ -278,7 +279,7 @@ func TestUDPProxy(t *testing.T) {
})
listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil {
t.Fatal(err)
}
@ -305,7 +306,7 @@ func TestUDPProxyTimeout(t *testing.T) {
})
listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil {
t.Fatal(err)
}
@ -344,7 +345,7 @@ func TestMultiPortProxy(t *testing.T) {
})
listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil {
t.Fatal(err)
}
@ -374,7 +375,7 @@ func TestMultiPortOnServiceAdd(t *testing.T) {
serviceX := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "x"}
listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil {
t.Fatal(err)
}
@ -440,7 +441,7 @@ func TestTCPProxyStop(t *testing.T) {
})
listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil {
t.Fatal(err)
}
@ -484,7 +485,7 @@ func TestUDPProxyStop(t *testing.T) {
})
listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil {
t.Fatal(err)
}
@ -522,7 +523,7 @@ func TestTCPProxyUpdateDelete(t *testing.T) {
})
listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil {
t.Fatal(err)
}
@ -567,7 +568,7 @@ func TestUDPProxyUpdateDelete(t *testing.T) {
})
listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil {
t.Fatal(err)
}
@ -612,7 +613,7 @@ func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
lb.OnEndpointsAdd(endpoint)
listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil {
t.Fatal(err)
}
@ -674,7 +675,7 @@ func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
lb.OnEndpointsAdd(endpoint)
listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil {
t.Fatal(err)
}
@ -735,7 +736,7 @@ func TestTCPProxyUpdatePort(t *testing.T) {
})
listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil {
t.Fatal(err)
}
@ -783,7 +784,7 @@ func TestUDPProxyUpdatePort(t *testing.T) {
})
listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil {
t.Fatal(err)
}
@ -828,7 +829,7 @@ func TestProxyUpdatePublicIPs(t *testing.T) {
})
listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil {
t.Fatal(err)
}
@ -881,7 +882,7 @@ func TestProxyUpdatePortal(t *testing.T) {
lb.OnEndpointsAdd(endpoint)
listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil {
t.Fatal(err)
}

View File

@ -24,7 +24,7 @@ import (
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/registry/core/service/allocator"
utilnet "k8s.io/utils/net"
netutils "k8s.io/utils/net"
)
// Interface manages the allocation of IP addresses out of a range. Interface
@ -85,12 +85,12 @@ type Range struct {
func New(cidr *net.IPNet, allocatorFactory allocator.AllocatorFactory) (*Range, error) {
registerMetrics()
max := utilnet.RangeSize(cidr)
base := utilnet.BigForIP(cidr.IP)
max := netutils.RangeSize(cidr)
base := netutils.BigForIP(cidr.IP)
rangeSpec := cidr.String()
var family api.IPFamily
if utilnet.IsIPv6CIDR(cidr) {
if netutils.IsIPv6CIDR(cidr) {
family = api.IPv6Protocol
// Limit the max size, since the allocator keeps a bitmap of that size.
if max > 65536 {
@ -126,7 +126,7 @@ func NewInMemory(cidr *net.IPNet) (*Range, error) {
// NewFromSnapshot allocates a Range and initializes it from a snapshot.
func NewFromSnapshot(snap *api.RangeAllocation) (*Range, error) {
_, ipnet, err := net.ParseCIDR(snap.Range)
_, ipnet, err := netutils.ParseCIDRSloppy(snap.Range)
if err != nil {
return nil, err
}
@ -219,7 +219,7 @@ func (r *Range) AllocateNext() (net.IP, error) {
clusterIPAllocated.WithLabelValues(label.String()).Set(float64(r.Used()))
clusterIPAvailable.WithLabelValues(label.String()).Set(float64(r.Free()))
return utilnet.AddIPOffset(r.base, offset), nil
return netutils.AddIPOffset(r.base, offset), nil
}
// Release releases the IP back to the pool. Releasing an
@ -244,7 +244,7 @@ func (r *Range) Release(ip net.IP) error {
// ForEach calls the provided function for each allocated IP.
func (r *Range) ForEach(fn func(net.IP)) {
r.alloc.ForEach(func(offset int) {
ip, _ := utilnet.GetIndexedIP(r.net, offset+1) // +1 because Range doesn't store IP 0
ip, _ := netutils.GetIndexedIP(r.net, offset+1) // +1 because Range doesn't store IP 0
fn(ip)
})
}
@ -310,5 +310,5 @@ func (r *Range) contains(ip net.IP) (bool, int) {
// calculateIPOffset calculates the integer offset of ip from base such that
// base + offset = ip. It requires ip >= base.
func calculateIPOffset(base *big.Int, ip net.IP) int {
return int(big.NewInt(0).Sub(utilnet.BigForIP(ip), base).Int64())
return int(big.NewInt(0).Sub(netutils.BigForIP(ip), base).Int64())
}

View File

@ -23,6 +23,7 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/component-base/metrics/testutil"
api "k8s.io/kubernetes/pkg/apis/core"
netutils "k8s.io/utils/net"
)
func TestAllocate(t *testing.T) {
@ -65,7 +66,7 @@ func TestAllocate(t *testing.T) {
},
}
for _, tc := range testCases {
_, cidr, err := net.ParseCIDR(tc.cidr)
_, cidr, err := netutils.ParseCIDRSloppy(tc.cidr)
if err != nil {
t.Fatal(err)
}
@ -110,7 +111,7 @@ func TestAllocate(t *testing.T) {
t.Fatal(err)
}
released := net.ParseIP(tc.released)
released := netutils.ParseIPSloppy(tc.released)
if err := r.Release(released); err != nil {
t.Fatal(err)
}
@ -132,12 +133,12 @@ func TestAllocate(t *testing.T) {
t.Fatal(err)
}
for _, outOfRange := range tc.outOfRange {
err = r.Allocate(net.ParseIP(outOfRange))
err = r.Allocate(netutils.ParseIPSloppy(outOfRange))
if _, ok := err.(*ErrNotInRange); !ok {
t.Fatal(err)
}
}
if err := r.Allocate(net.ParseIP(tc.alreadyAllocated)); err != ErrAllocated {
if err := r.Allocate(netutils.ParseIPSloppy(tc.alreadyAllocated)); err != ErrAllocated {
t.Fatal(err)
}
if f := r.Free(); f != 1 {
@ -159,7 +160,7 @@ func TestAllocate(t *testing.T) {
}
func TestAllocateTiny(t *testing.T) {
_, cidr, err := net.ParseCIDR("192.168.1.0/32")
_, cidr, err := netutils.ParseCIDRSloppy("192.168.1.0/32")
if err != nil {
t.Fatal(err)
}
@ -176,7 +177,7 @@ func TestAllocateTiny(t *testing.T) {
}
func TestAllocateSmall(t *testing.T) {
_, cidr, err := net.ParseCIDR("192.168.1.240/30")
_, cidr, err := netutils.ParseCIDRSloppy("192.168.1.240/30")
if err != nil {
t.Fatal(err)
}
@ -199,10 +200,10 @@ func TestAllocateSmall(t *testing.T) {
found.Insert(ip.String())
}
for s := range found {
if !r.Has(net.ParseIP(s)) {
if !r.Has(netutils.ParseIPSloppy(s)) {
t.Fatalf("missing: %s", s)
}
if err := r.Allocate(net.ParseIP(s)); err != ErrAllocated {
if err := r.Allocate(netutils.ParseIPSloppy(s)); err != ErrAllocated {
t.Fatal(err)
}
}
@ -220,7 +221,7 @@ func TestAllocateSmall(t *testing.T) {
}
func TestForEach(t *testing.T) {
_, cidr, err := net.ParseCIDR("192.168.1.0/24")
_, cidr, err := netutils.ParseCIDRSloppy("192.168.1.0/24")
if err != nil {
t.Fatal(err)
}
@ -238,7 +239,7 @@ func TestForEach(t *testing.T) {
t.Fatal(err)
}
for ips := range tc {
ip := net.ParseIP(ips)
ip := netutils.ParseIPSloppy(ips)
if err := r.Allocate(ip); err != nil {
t.Errorf("[%d] error allocating IP %v: %v", i, ip, err)
}
@ -260,7 +261,7 @@ func TestForEach(t *testing.T) {
}
func TestSnapshot(t *testing.T) {
_, cidr, err := net.ParseCIDR("192.168.1.0/24")
_, cidr, err := netutils.ParseCIDRSloppy("192.168.1.0/24")
if err != nil {
t.Fatal(err)
}
@ -283,7 +284,7 @@ func TestSnapshot(t *testing.T) {
t.Fatal(err)
}
_, network, err := net.ParseCIDR(dst.Range)
_, network, err := netutils.ParseCIDRSloppy(dst.Range)
if err != nil {
t.Fatal(err)
}
@ -292,7 +293,7 @@ func TestSnapshot(t *testing.T) {
t.Fatalf("mismatched networks: %s : %s", network, cidr)
}
_, otherCidr, err := net.ParseCIDR("192.168.2.0/24")
_, otherCidr, err := netutils.ParseCIDRSloppy("192.168.2.0/24")
if err != nil {
t.Fatal(err)
}
@ -322,7 +323,7 @@ func TestSnapshot(t *testing.T) {
}
func TestNewFromSnapshot(t *testing.T) {
_, cidr, err := net.ParseCIDR("192.168.0.0/24")
_, cidr, err := netutils.ParseCIDRSloppy("192.168.0.0/24")
if err != nil {
t.Fatal(err)
}
@ -366,7 +367,7 @@ func TestNewFromSnapshot(t *testing.T) {
func TestClusterIPMetrics(t *testing.T) {
// create IPv4 allocator
cidrIPv4 := "10.0.0.0/24"
_, clusterCIDRv4, _ := net.ParseCIDR(cidrIPv4)
_, clusterCIDRv4, _ := netutils.ParseCIDRSloppy(cidrIPv4)
a, err := NewInMemory(clusterCIDRv4)
if err != nil {
t.Fatalf("unexpected error creating CidrSet: %v", err)
@ -374,7 +375,7 @@ func TestClusterIPMetrics(t *testing.T) {
clearMetrics(map[string]string{"cidr": cidrIPv4})
// create IPv6 allocator
cidrIPv6 := "2001:db8::/112"
_, clusterCIDRv6, _ := net.ParseCIDR(cidrIPv6)
_, clusterCIDRv6, _ := netutils.ParseCIDRSloppy(cidrIPv6)
b, err := NewInMemory(clusterCIDRv6)
if err != nil {
t.Fatalf("unexpected error creating CidrSet: %v", err)
@ -420,10 +421,10 @@ func TestClusterIPMetrics(t *testing.T) {
// try to allocate the same IP addresses
for s := range found {
if !a.Has(net.ParseIP(s)) {
if !a.Has(netutils.ParseIPSloppy(s)) {
t.Fatalf("missing: %s", s)
}
if err := a.Allocate(net.ParseIP(s)); err != ErrAllocated {
if err := a.Allocate(netutils.ParseIPSloppy(s)); err != ErrAllocated {
t.Fatal(err)
}
}
@ -437,10 +438,10 @@ func TestClusterIPMetrics(t *testing.T) {
// release the addresses allocated
for s := range found {
if !a.Has(net.ParseIP(s)) {
if !a.Has(netutils.ParseIPSloppy(s)) {
t.Fatalf("missing: %s", s)
}
if err := a.Release(net.ParseIP(s)); err != nil {
if err := a.Release(netutils.ParseIPSloppy(s)); err != nil {
t.Fatal(err)
}
}

View File

@ -35,7 +35,7 @@ import (
"k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/registry/core/rangeallocation"
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
netutil "k8s.io/utils/net"
netutils "k8s.io/utils/net"
)
// Repair is a controller loop that periodically examines all service ClusterIP allocations
@ -82,7 +82,7 @@ func NewRepair(interval time.Duration, serviceClient corev1client.ServicesGetter
primary := v1.IPv4Protocol
secondary := v1.IPv6Protocol
if netutil.IsIPv6(network.IP) {
if netutils.IsIPv6(network.IP) {
primary = v1.IPv6Protocol
}
@ -196,7 +196,7 @@ func (c *Repair) runOnce() error {
}
getFamilyByIP := func(ip net.IP) v1.IPFamily {
if netutil.IsIPv6(ip) {
if netutils.IsIPv6(ip) {
return v1.IPv6Protocol
}
return v1.IPv4Protocol
@ -210,7 +210,7 @@ func (c *Repair) runOnce() error {
}
for _, ip := range svc.Spec.ClusterIPs {
ip := net.ParseIP(ip)
ip := netutils.ParseIPSloppy(ip)
if ip == nil {
// cluster IP is corrupt
c.recorder.Eventf(&svc, v1.EventTypeWarning, "ClusterIPNotValid", "Cluster IP %s is not a valid IP; please recreate service", ip)

View File

@ -28,6 +28,7 @@ import (
"k8s.io/client-go/kubernetes/fake"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
netutils "k8s.io/utils/net"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
@ -60,7 +61,7 @@ func TestRepair(t *testing.T) {
ipregistry := &mockRangeRegistry{
item: &api.RangeAllocation{Range: "192.168.1.0/24"},
}
_, cidr, _ := net.ParseCIDR(ipregistry.item.Range)
_, cidr, _ := netutils.ParseCIDRSloppy(ipregistry.item.Range)
r := NewRepair(0, fakeClient.CoreV1(), fakeClient.CoreV1(), cidr, ipregistry, nil, nil)
if err := r.RunOnce(); err != nil {
@ -81,12 +82,12 @@ func TestRepair(t *testing.T) {
}
func TestRepairLeak(t *testing.T) {
_, cidr, _ := net.ParseCIDR("192.168.1.0/24")
_, cidr, _ := netutils.ParseCIDRSloppy("192.168.1.0/24")
previous, err := ipallocator.NewInMemory(cidr)
if err != nil {
t.Fatal(err)
}
previous.Allocate(net.ParseIP("192.168.1.10"))
previous.Allocate(netutils.ParseIPSloppy("192.168.1.10"))
var dst api.RangeAllocation
err = previous.Snapshot(&dst)
@ -115,7 +116,7 @@ func TestRepairLeak(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if !after.Has(net.ParseIP("192.168.1.10")) {
if !after.Has(netutils.ParseIPSloppy("192.168.1.10")) {
t.Errorf("expected ipallocator to still have leaked IP")
}
}
@ -127,13 +128,13 @@ func TestRepairLeak(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if after.Has(net.ParseIP("192.168.1.10")) {
if after.Has(netutils.ParseIPSloppy("192.168.1.10")) {
t.Errorf("expected ipallocator to not have leaked IP")
}
}
func TestRepairWithExisting(t *testing.T) {
_, cidr, _ := net.ParseCIDR("192.168.1.0/24")
_, cidr, _ := netutils.ParseCIDRSloppy("192.168.1.0/24")
previous, err := ipallocator.NewInMemory(cidr)
if err != nil {
t.Fatal(err)
@ -211,7 +212,7 @@ func TestRepairWithExisting(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if !after.Has(net.ParseIP("192.168.1.1")) || !after.Has(net.ParseIP("192.168.1.100")) {
if !after.Has(netutils.ParseIPSloppy("192.168.1.1")) || !after.Has(netutils.ParseIPSloppy("192.168.1.100")) {
t.Errorf("unexpected ipallocator state: %#v", after)
}
if free := after.Free(); free != 252 {
@ -220,7 +221,7 @@ func TestRepairWithExisting(t *testing.T) {
}
func makeRangeRegistry(t *testing.T, cidrRange string) *mockRangeRegistry {
_, cidr, _ := net.ParseCIDR(cidrRange)
_, cidr, _ := netutils.ParseCIDRSloppy(cidrRange)
previous, err := ipallocator.NewInMemory(cidr)
if err != nil {
t.Fatal(err)
@ -247,7 +248,7 @@ func makeFakeClientSet() *fake.Clientset {
return fake.NewSimpleClientset()
}
func makeIPNet(cidr string) *net.IPNet {
_, net, _ := net.ParseCIDR(cidr)
_, net, _ := netutils.ParseCIDRSloppy(cidr)
return net
}
func TestShouldWorkOnSecondary(t *testing.T) {
@ -337,8 +338,8 @@ func TestRepairDualStack(t *testing.T) {
item: &api.RangeAllocation{Range: "2000::/108"},
}
_, cidr, _ := net.ParseCIDR(ipregistry.item.Range)
_, secondaryCIDR, _ := net.ParseCIDR(secondaryIPRegistry.item.Range)
_, cidr, _ := netutils.ParseCIDRSloppy(ipregistry.item.Range)
_, secondaryCIDR, _ := netutils.ParseCIDRSloppy(secondaryIPRegistry.item.Range)
r := NewRepair(0, fakeClient.CoreV1(), fakeClient.CoreV1(), cidr, ipregistry, secondaryCIDR, secondaryIPRegistry)
if err := r.RunOnce(); err != nil {
@ -369,20 +370,20 @@ func TestRepairDualStack(t *testing.T) {
func TestRepairLeakDualStack(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.IPv6DualStack, true)()
_, cidr, _ := net.ParseCIDR("192.168.1.0/24")
_, cidr, _ := netutils.ParseCIDRSloppy("192.168.1.0/24")
previous, err := ipallocator.NewInMemory(cidr)
if err != nil {
t.Fatal(err)
}
previous.Allocate(net.ParseIP("192.168.1.10"))
previous.Allocate(netutils.ParseIPSloppy("192.168.1.10"))
_, secondaryCIDR, _ := net.ParseCIDR("2000::/108")
_, secondaryCIDR, _ := netutils.ParseCIDRSloppy("2000::/108")
secondaryPrevious, err := ipallocator.NewInMemory(secondaryCIDR)
if err != nil {
t.Fatal(err)
}
secondaryPrevious.Allocate(net.ParseIP("2000::1"))
secondaryPrevious.Allocate(netutils.ParseIPSloppy("2000::1"))
var dst api.RangeAllocation
err = previous.Snapshot(&dst)
@ -427,14 +428,14 @@ func TestRepairLeakDualStack(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if !after.Has(net.ParseIP("192.168.1.10")) {
if !after.Has(netutils.ParseIPSloppy("192.168.1.10")) {
t.Errorf("expected ipallocator to still have leaked IP")
}
secondaryAfter, err := ipallocator.NewFromSnapshot(secondaryIPRegistry.updated)
if err != nil {
t.Fatal(err)
}
if !secondaryAfter.Has(net.ParseIP("2000::1")) {
if !secondaryAfter.Has(netutils.ParseIPSloppy("2000::1")) {
t.Errorf("expected ipallocator to still have leaked IP")
}
}
@ -447,14 +448,14 @@ func TestRepairLeakDualStack(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if after.Has(net.ParseIP("192.168.1.10")) {
if after.Has(netutils.ParseIPSloppy("192.168.1.10")) {
t.Errorf("expected ipallocator to not have leaked IP")
}
secondaryAfter, err := ipallocator.NewFromSnapshot(secondaryIPRegistry.updated)
if err != nil {
t.Fatal(err)
}
if secondaryAfter.Has(net.ParseIP("2000::1")) {
if secondaryAfter.Has(netutils.ParseIPSloppy("2000::1")) {
t.Errorf("expected ipallocator to not have leaked IP")
}
}
@ -466,13 +467,13 @@ func TestRepairWithExistingDualStack(t *testing.T) {
// this will work every where except alloc & validation
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.IPv6DualStack, true)()
_, cidr, _ := net.ParseCIDR("192.168.1.0/24")
_, cidr, _ := netutils.ParseCIDRSloppy("192.168.1.0/24")
previous, err := ipallocator.NewInMemory(cidr)
if err != nil {
t.Fatal(err)
}
_, secondaryCIDR, _ := net.ParseCIDR("2000::/108")
_, secondaryCIDR, _ := netutils.ParseCIDRSloppy("2000::/108")
secondaryPrevious, err := ipallocator.NewInMemory(secondaryCIDR)
if err != nil {
t.Fatal(err)
@ -613,7 +614,7 @@ func TestRepairWithExistingDualStack(t *testing.T) {
t.Fatal(err)
}
if !after.Has(net.ParseIP("192.168.1.1")) || !after.Has(net.ParseIP("192.168.1.100")) {
if !after.Has(netutils.ParseIPSloppy("192.168.1.1")) || !after.Has(netutils.ParseIPSloppy("192.168.1.100")) {
t.Errorf("unexpected ipallocator state: %#v", after)
}
if free := after.Free(); free != 251 {
@ -624,7 +625,7 @@ func TestRepairWithExistingDualStack(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if !secondaryAfter.Has(net.ParseIP("2000::1")) || !secondaryAfter.Has(net.ParseIP("2000::2")) {
if !secondaryAfter.Has(netutils.ParseIPSloppy("2000::1")) || !secondaryAfter.Has(netutils.ParseIPSloppy("2000::2")) {
t.Errorf("unexpected ipallocator state: %#v", secondaryAfter)
}
if free := secondaryAfter.Free(); free != 65533 {

View File

@ -18,7 +18,6 @@ package storage
import (
"context"
"net"
"strings"
"testing"
@ -32,11 +31,12 @@ import (
allocatorstore "k8s.io/kubernetes/pkg/registry/core/service/allocator/storage"
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
"k8s.io/kubernetes/pkg/registry/registrytest"
netutils "k8s.io/utils/net"
)
func newStorage(t *testing.T) (*etcd3testing.EtcdTestServer, ipallocator.Interface, allocator.Interface, storage.Interface, factory.DestroyFunc) {
etcdStorage, server := registrytest.NewEtcdStorage(t, "")
_, cidr, err := net.ParseCIDR("192.168.1.0/24")
_, cidr, err := netutils.ParseCIDRSloppy("192.168.1.0/24")
if err != nil {
t.Fatal(err)
}
@ -66,7 +66,7 @@ func newStorage(t *testing.T) (*etcd3testing.EtcdTestServer, ipallocator.Interfa
}
func validNewRangeAllocation() *api.RangeAllocation {
_, cidr, _ := net.ParseCIDR("192.168.1.0/24")
_, cidr, _ := netutils.ParseCIDRSloppy("192.168.1.0/24")
return &api.RangeAllocation{
Range: cidr.String(),
}
@ -79,7 +79,7 @@ func key() string {
func TestEmpty(t *testing.T) {
_, storage, _, _, destroyFunc := newStorage(t)
defer destroyFunc()
if err := storage.Allocate(net.ParseIP("192.168.1.2")); !strings.Contains(err.Error(), "cannot allocate resources of type serviceipallocations at this time") {
if err := storage.Allocate(netutils.ParseIPSloppy("192.168.1.2")); !strings.Contains(err.Error(), "cannot allocate resources of type serviceipallocations at this time") {
t.Fatal(err)
}
}
@ -87,7 +87,7 @@ func TestEmpty(t *testing.T) {
func TestErrors(t *testing.T) {
_, storage, _, _, destroyFunc := newStorage(t)
defer destroyFunc()
err := storage.Allocate(net.ParseIP("192.168.0.0"))
err := storage.Allocate(netutils.ParseIPSloppy("192.168.0.0"))
if _, ok := err.(*ipallocator.ErrNotInRange); !ok {
t.Fatal(err)
}
@ -100,7 +100,7 @@ func TestStore(t *testing.T) {
t.Fatalf("unexpected error: %v", err)
}
if err := storage.Allocate(net.ParseIP("192.168.1.2")); err != nil {
if err := storage.Allocate(netutils.ParseIPSloppy("192.168.1.2")); err != nil {
t.Fatal(err)
}
ok, err := backing.Allocate(1)
@ -110,7 +110,7 @@ func TestStore(t *testing.T) {
if ok {
t.Fatal("Expected allocation to fail")
}
if err := storage.Allocate(net.ParseIP("192.168.1.2")); err != ipallocator.ErrAllocated {
if err := storage.Allocate(netutils.ParseIPSloppy("192.168.1.2")); err != ipallocator.ErrAllocated {
t.Fatal(err)
}
}

View File

@ -45,7 +45,7 @@ import (
registry "k8s.io/kubernetes/pkg/registry/core/service"
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
"k8s.io/kubernetes/pkg/registry/core/service/portallocator"
netutil "k8s.io/utils/net"
netutils "k8s.io/utils/net"
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
)
@ -108,7 +108,7 @@ func NewREST(
// detect this cluster default Service IPFamily (ipfamily of --service-cluster-ip-range[0])
serviceIPFamily := api.IPv4Protocol
cidr := serviceIPs.CIDR()
if netutil.IsIPv6CIDR(&cidr) {
if netutils.IsIPv6CIDR(&cidr) {
serviceIPFamily = api.IPv6Protocol
}
@ -612,7 +612,7 @@ func (rs *REST) allocClusterIPs(service *api.Service, toAlloc map[api.IPFamily]s
}
allocated[family] = allocatedIP.String()
} else {
parsedIP := net.ParseIP(ip)
parsedIP := netutils.ParseIPSloppy(ip)
if err := allocator.Allocate(parsedIP); err != nil {
el := field.ErrorList{field.Invalid(field.NewPath("spec", "clusterIPs"), service.Spec.ClusterIPs, fmt.Sprintf("failed to allocate IP %v: %v", ip, err))}
return allocated, errors.NewInvalid(api.Kind("Service"), service.Name, el)
@ -638,7 +638,7 @@ func (rs *REST) releaseClusterIPs(toRelease map[api.IPFamily]string) (map[api.IP
continue
}
parsedIP := net.ParseIP(ip)
parsedIP := netutils.ParseIPSloppy(ip)
if err := allocator.Release(parsedIP); err != nil {
return released, err
}
@ -825,7 +825,7 @@ func (rs *REST) releaseServiceClusterIP(service *api.Service) (released map[api.
// we need to do that to handle cases where allocator is no longer configured on
// cluster
if netutil.IsIPv6String(service.Spec.ClusterIP) {
if netutils.IsIPv6String(service.Spec.ClusterIP) {
toRelease[api.IPv6Protocol] = service.Spec.ClusterIP
} else {
toRelease[api.IPv4Protocol] = service.Spec.ClusterIP
@ -852,7 +852,7 @@ func (rs *REST) releaseServiceClusterIPs(service *api.Service) (released map[api
toRelease := make(map[api.IPFamily]string)
for _, ip := range service.Spec.ClusterIPs {
if netutil.IsIPv6String(ip) {
if netutils.IsIPv6String(ip) {
toRelease[api.IPv6Protocol] = ip
} else {
toRelease[api.IPv4Protocol] = ip
@ -974,7 +974,7 @@ func (rs *REST) tryDefaultValidateServiceClusterIPFields(oldService, service *ap
// we have previously validated for ip correctness and if family exist it will match ip family
// so the following is safe to do
isIPv6 := netutil.IsIPv6String(ip)
isIPv6 := netutils.IsIPv6String(ip)
// Family is not specified yet.
if i >= len(service.Spec.IPFamilies) {

View File

@ -49,7 +49,7 @@ import (
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
"k8s.io/kubernetes/pkg/registry/core/service/portallocator"
"k8s.io/kubernetes/pkg/registry/registrytest"
netutil "k8s.io/utils/net"
netutils "k8s.io/utils/net"
utilpointer "k8s.io/utils/pointer"
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
)
@ -245,14 +245,14 @@ func NewTestRESTWithPods(t *testing.T, endpoints []*api.Endpoints, pods []api.Po
}
func makeIPNet(t *testing.T) *net.IPNet {
_, net, err := net.ParseCIDR("1.2.3.0/24")
_, net, err := netutils.ParseCIDRSloppy("1.2.3.0/24")
if err != nil {
t.Error(err)
}
return net
}
func makeIPNet6(t *testing.T) *net.IPNet {
_, net, err := net.ParseCIDR("2000::/108")
_, net, err := netutils.ParseCIDRSloppy("2000::/108")
if err != nil {
t.Error(err)
}
@ -261,7 +261,7 @@ func makeIPNet6(t *testing.T) *net.IPNet {
func ipIsAllocated(t *testing.T, alloc ipallocator.Interface, ipstr string) bool {
t.Helper()
ip := net.ParseIP(ipstr)
ip := netutils.ParseIPSloppy(ipstr)
if ip == nil {
t.Errorf("error parsing IP %q", ipstr)
return false
@ -334,7 +334,7 @@ func TestServiceRegistryCreate(t *testing.T) {
allocator := storage.serviceIPAllocatorsByFamily[family]
c := allocator.CIDR()
cidr := &c
if !cidr.Contains(net.ParseIP(createdService.Spec.ClusterIPs[i])) {
if !cidr.Contains(netutils.ParseIPSloppy(createdService.Spec.ClusterIPs[i])) {
t.Errorf("Unexpected ClusterIP: %s", createdService.Spec.ClusterIPs[i])
}
}
@ -1309,7 +1309,7 @@ func TestServiceRegistryIPAllocation(t *testing.T) {
if createdSvc1.Name != "foo" {
t.Errorf("Expected foo, but got %v", createdSvc1.Name)
}
if !makeIPNet(t).Contains(net.ParseIP(createdSvc1.Spec.ClusterIPs[0])) {
if !makeIPNet(t).Contains(netutils.ParseIPSloppy(createdSvc1.Spec.ClusterIPs[0])) {
t.Errorf("Unexpected ClusterIP: %s", createdSvc1.Spec.ClusterIPs[0])
}
@ -1323,7 +1323,7 @@ func TestServiceRegistryIPAllocation(t *testing.T) {
if createdSvc2.Name != "bar" {
t.Errorf("Expected bar, but got %v", createdSvc2.Name)
}
if !makeIPNet(t).Contains(net.ParseIP(createdSvc2.Spec.ClusterIPs[0])) {
if !makeIPNet(t).Contains(netutils.ParseIPSloppy(createdSvc2.Spec.ClusterIPs[0])) {
t.Errorf("Unexpected ClusterIP: %s", createdSvc2.Spec.ClusterIPs[0])
}
@ -1362,7 +1362,7 @@ func TestServiceRegistryIPReallocation(t *testing.T) {
if createdSvc1.Name != "foo" {
t.Errorf("Expected foo, but got %v", createdSvc1.Name)
}
if !makeIPNet(t).Contains(net.ParseIP(createdSvc1.Spec.ClusterIPs[0])) {
if !makeIPNet(t).Contains(netutils.ParseIPSloppy(createdSvc1.Spec.ClusterIPs[0])) {
t.Errorf("Unexpected ClusterIP: %s", createdSvc1.Spec.ClusterIPs[0])
}
@ -1381,7 +1381,7 @@ func TestServiceRegistryIPReallocation(t *testing.T) {
if createdSvc2.Name != "bar" {
t.Errorf("Expected bar, but got %v", createdSvc2.Name)
}
if !makeIPNet(t).Contains(net.ParseIP(createdSvc2.Spec.ClusterIPs[0])) {
if !makeIPNet(t).Contains(netutils.ParseIPSloppy(createdSvc2.Spec.ClusterIPs[0])) {
t.Errorf("Unexpected ClusterIP: %s", createdSvc2.Spec.ClusterIPs[0])
}
}
@ -1400,7 +1400,7 @@ func TestServiceRegistryIPUpdate(t *testing.T) {
if createdService.Spec.Ports[0].Port != svc.Spec.Ports[0].Port {
t.Errorf("Expected port %d, but got %v", svc.Spec.Ports[0].Port, createdService.Spec.Ports[0].Port)
}
if !makeIPNet(t).Contains(net.ParseIP(createdService.Spec.ClusterIPs[0])) {
if !makeIPNet(t).Contains(netutils.ParseIPSloppy(createdService.Spec.ClusterIPs[0])) {
t.Errorf("Unexpected ClusterIP: %s", createdService.Spec.ClusterIPs[0])
}
@ -1451,7 +1451,7 @@ func TestServiceRegistryIPLoadBalancer(t *testing.T) {
if createdService.Spec.Ports[0].Port != svc.Spec.Ports[0].Port {
t.Errorf("Expected port %d, but got %v", svc.Spec.Ports[0].Port, createdService.Spec.Ports[0].Port)
}
if !makeIPNet(t).Contains(net.ParseIP(createdService.Spec.ClusterIPs[0])) {
if !makeIPNet(t).Contains(netutils.ParseIPSloppy(createdService.Spec.ClusterIPs[0])) {
t.Errorf("Unexpected ClusterIP: %s", createdService.Spec.ClusterIPs[0])
}
@ -1797,7 +1797,7 @@ func TestInitClusterIP(t *testing.T) {
if !ok {
t.Fatalf("test is incorrect, allocator does not exist on rest")
}
if err := allocator.Allocate(net.ParseIP(ip)); err != nil {
if err := allocator.Allocate(netutils.ParseIPSloppy(ip)); err != nil {
t.Fatalf("test is incorrect, allocator failed to pre allocate IP with error:%v", err)
}
}
@ -1821,7 +1821,7 @@ func TestInitClusterIP(t *testing.T) {
if newSvc.Spec.ClusterIPs[0] != api.ClusterIPNone {
for _, ip := range newSvc.Spec.ClusterIPs {
family := api.IPv4Protocol
if netutil.IsIPv6String(ip) {
if netutils.IsIPv6String(ip) {
family = api.IPv6Protocol
}
allocator := storage.serviceIPAllocatorsByFamily[family]
@ -2225,7 +2225,7 @@ func TestServiceUpgrade(t *testing.T) {
// allocated IP
for family, ip := range testCase.allocateIPsBeforeUpdate {
alloc := storage.serviceIPAllocatorsByFamily[family]
if err := alloc.Allocate(net.ParseIP(ip)); err != nil {
if err := alloc.Allocate(netutils.ParseIPSloppy(ip)); err != nil {
t.Fatalf("test is incorrect, unable to preallocate ip:%v", ip)
}
}
@ -3653,7 +3653,7 @@ func isValidClusterIPFields(t *testing.T, storage *REST, pre *api.Service, post
}
// ips must match families
for i, ip := range post.Spec.ClusterIPs {
isIPv6 := netutil.IsIPv6String(ip)
isIPv6 := netutils.IsIPv6String(ip)
if isIPv6 && post.Spec.IPFamilies[i] != api.IPv6Protocol {
t.Fatalf("ips does not match assigned families %+v %+v", post.Spec.ClusterIPs, post.Spec.IPFamilies)
}

View File

@ -17,7 +17,6 @@ limitations under the License.
package storage
import (
"net"
"reflect"
"testing"
@ -31,6 +30,7 @@ import (
etcd3testing "k8s.io/apiserver/pkg/storage/etcd3/testing"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/registry/registrytest"
netutils "k8s.io/utils/net"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
@ -421,7 +421,7 @@ func TestServiceDefaultOnRead(t *testing.T) {
ResourcePrefix: "services",
}
_, cidr, err := net.ParseCIDR("10.0.0.0/24")
_, cidr, err := netutils.ParseCIDRSloppy("10.0.0.0/24")
if err != nil {
t.Fatalf("failed to parse CIDR")
}
@ -479,7 +479,7 @@ func TestServiceDefaulting(t *testing.T) {
ResourcePrefix: "services",
}
_, cidr, err := net.ParseCIDR(primaryCIDR)
_, cidr, err := netutils.ParseCIDRSloppy(primaryCIDR)
if err != nil {
t.Fatalf("failed to parse CIDR %s", primaryCIDR)
}

View File

@ -17,7 +17,6 @@ limitations under the License.
package service
import (
"net"
"reflect"
"testing"
@ -34,11 +33,12 @@ import (
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/features"
netutils "k8s.io/utils/net"
utilpointer "k8s.io/utils/pointer"
)
func newStrategy(cidr string, hasSecondary bool) (testStrategy Strategy, testStatusStrategy Strategy) {
_, testCIDR, err := net.ParseCIDR(cidr)
_, testCIDR, err := netutils.ParseCIDRSloppy(cidr)
if err != nil {
panic("invalid CIDR")
}

View File

@ -28,6 +28,7 @@ import (
"k8s.io/kube-scheduler/config/v1beta1"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
netutils "k8s.io/utils/net"
"k8s.io/utils/pointer"
)
@ -143,7 +144,7 @@ func SetDefaults_KubeSchedulerConfiguration(obj *v1beta1.KubeSchedulerConfigurat
} else {
// Something went wrong splitting the host/port, could just be a missing port so check if the
// existing value is a valid IP address. If so, use that with the default scheduler port
if host := net.ParseIP(*obj.HealthzBindAddress); host != nil {
if host := netutils.ParseIPSloppy(*obj.HealthzBindAddress); host != nil {
hostPort := net.JoinHostPort(*obj.HealthzBindAddress, strconv.Itoa(config.DefaultInsecureSchedulerPort))
obj.HealthzBindAddress = &hostPort
} else {
@ -165,7 +166,7 @@ func SetDefaults_KubeSchedulerConfiguration(obj *v1beta1.KubeSchedulerConfigurat
} else {
// Something went wrong splitting the host/port, could just be a missing port so check if the
// existing value is a valid IP address. If so, use that with the default scheduler port
if host := net.ParseIP(*obj.MetricsBindAddress); host != nil {
if host := netutils.ParseIPSloppy(*obj.MetricsBindAddress); host != nil {
hostPort := net.JoinHostPort(*obj.MetricsBindAddress, strconv.Itoa(config.DefaultInsecureSchedulerPort))
obj.MetricsBindAddress = &hostPort
} else {

View File

@ -28,6 +28,7 @@ import (
"k8s.io/kube-scheduler/config/v1beta2"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
netutils "k8s.io/utils/net"
"k8s.io/utils/pointer"
)
@ -141,7 +142,7 @@ func SetDefaults_KubeSchedulerConfiguration(obj *v1beta2.KubeSchedulerConfigurat
} else {
// Something went wrong splitting the host/port, could just be a missing port so check if the
// existing value is a valid IP address. If so, use that with the default scheduler port
if host := net.ParseIP(*obj.HealthzBindAddress); host != nil {
if host := netutils.ParseIPSloppy(*obj.HealthzBindAddress); host != nil {
hostPort := net.JoinHostPort(*obj.HealthzBindAddress, strconv.Itoa(config.DefaultInsecureSchedulerPort))
obj.HealthzBindAddress = &hostPort
}
@ -160,7 +161,7 @@ func SetDefaults_KubeSchedulerConfiguration(obj *v1beta2.KubeSchedulerConfigurat
} else {
// Something went wrong splitting the host/port, could just be a missing port so check if the
// existing value is a valid IP address. If so, use that with the default scheduler port
if host := net.ParseIP(*obj.MetricsBindAddress); host != nil {
if host := netutils.ParseIPSloppy(*obj.MetricsBindAddress); host != nil {
hostPort := net.JoinHostPort(*obj.MetricsBindAddress, strconv.Itoa(config.DefaultInsecureSchedulerPort))
obj.MetricsBindAddress = &hostPort
}

View File

@ -30,6 +30,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/exec"
netutils "k8s.io/utils/net"
"k8s.io/klog/v2"
)
@ -103,7 +104,7 @@ func (t *tcShaper) nextClassID() (int, error) {
// Convert a CIDR from text to a hex representation
// Strips any masked parts of the IP, so 1.2.3.4/16 becomes hex(1.2.0.0)/ffffffff
func hexCIDR(cidr string) (string, error) {
ip, ipnet, err := net.ParseCIDR(cidr)
ip, ipnet, err := netutils.ParseCIDRSloppy(cidr)
if err != nil {
return "", err
}

View File

@ -30,7 +30,7 @@ import (
utilnet "k8s.io/apimachinery/pkg/util/net"
corev1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
utilsnet "k8s.io/utils/net"
netutils "k8s.io/utils/net"
)
// TODO(mikedanese): remove these flag wrapper types when we remove command line flags
@ -53,7 +53,7 @@ func (v IPVar) Set(s string) error {
v.Val = nil
return nil
}
if net.ParseIP(s) == nil {
if netutils.ParseIPSloppy(s) == nil {
return fmt.Errorf("%q is not a valid IP address", s)
}
if v.Val == nil {
@ -96,7 +96,7 @@ func (v IPPortVar) Set(s string) error {
// Both IP and IP:port are valid.
// Attempt to parse into IP first.
if net.ParseIP(s) != nil {
if netutils.ParseIPSloppy(s) != nil {
*v.Val = s
return nil
}
@ -106,10 +106,10 @@ func (v IPPortVar) Set(s string) error {
if err != nil {
return fmt.Errorf("%q is not in a valid format (ip or ip:port): %v", s, err)
}
if net.ParseIP(host) == nil {
if netutils.ParseIPSloppy(host) == nil {
return fmt.Errorf("%q is not a valid IP address", host)
}
if _, err := utilsnet.ParsePort(port, true); err != nil {
if _, err := netutils.ParsePort(port, true); err != nil {
return fmt.Errorf("%q is not a valid number", port)
}
*v.Val = s

View File

@ -19,13 +19,13 @@ package ipset
import (
"bytes"
"fmt"
"net"
"regexp"
"strconv"
"strings"
"k8s.io/klog/v2"
utilexec "k8s.io/utils/exec"
netutils "k8s.io/utils/net"
)
// Interface is an injectable interface for running ipset commands. Implementations must be goroutine-safe.
@ -183,7 +183,7 @@ func (e *Entry) Validate(set *IPSet) bool {
}
// IP2 can not be empty for `hash:ip,port,ip` type ip set
if net.ParseIP(e.IP2) == nil {
if netutils.ParseIPSloppy(e.IP2) == nil {
klog.Errorf("Error parsing entry %v second ip address %v for ipset %v", e, e.IP2, set)
return false
}
@ -194,7 +194,7 @@ func (e *Entry) Validate(set *IPSet) bool {
}
// Net can not be empty for `hash:ip,port,net` type ip set
if _, ipNet, err := net.ParseCIDR(e.Net); ipNet == nil {
if _, ipNet, err := netutils.ParseCIDRSloppy(e.Net); ipNet == nil {
klog.Errorf("Error parsing entry %v ip net %v for ipset %v, error: %v", e, e.Net, set, err)
return false
}
@ -250,7 +250,7 @@ func (e *Entry) checkIPandProtocol(set *IPSet) bool {
return false
}
if net.ParseIP(e.IP) == nil {
if netutils.ParseIPSloppy(e.IP) == nil {
klog.Errorf("Error parsing entry %v ip address %v for ipset %v", e, e.IP, set)
return false
}

View File

@ -20,11 +20,12 @@ package ipvs
import (
"fmt"
"net"
"reflect"
"syscall"
"testing"
netutils "k8s.io/utils/net"
libipvs "github.com/moby/ipvs"
)
@ -65,7 +66,7 @@ func Test_toVirtualServer(t *testing.T) {
PEName: "",
},
VirtualServer{
Address: net.ParseIP("0.0.0.0"),
Address: netutils.ParseIPSloppy("0.0.0.0"),
Protocol: "TCP",
Port: 80,
Scheduler: "",
@ -85,11 +86,11 @@ func Test_toVirtualServer(t *testing.T) {
Timeout: 100,
Netmask: 128,
AddressFamily: syscall.AF_INET6,
Address: net.ParseIP("2012::beef"),
Address: netutils.ParseIPSloppy("2012::beef"),
PEName: "",
},
VirtualServer{
Address: net.ParseIP("2012::beef"),
Address: netutils.ParseIPSloppy("2012::beef"),
Protocol: "UDP",
Port: 33434,
Scheduler: "wlc",
@ -109,11 +110,11 @@ func Test_toVirtualServer(t *testing.T) {
Timeout: 0,
Netmask: 0xffffffff,
AddressFamily: syscall.AF_INET,
Address: net.ParseIP("1.2.3.4"),
Address: netutils.ParseIPSloppy("1.2.3.4"),
PEName: "",
},
VirtualServer{
Address: net.ParseIP("1.2.3.4"),
Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: "",
Port: 0,
Scheduler: "lc",
@ -137,7 +138,7 @@ func Test_toVirtualServer(t *testing.T) {
PEName: "",
},
VirtualServer{
Address: net.ParseIP("::0"),
Address: netutils.ParseIPSloppy("::0"),
Protocol: "",
Port: 0,
Scheduler: "wrr",
@ -161,7 +162,7 @@ func Test_toVirtualServer(t *testing.T) {
PEName: "",
},
VirtualServer{
Address: net.ParseIP("0.0.0.0"),
Address: netutils.ParseIPSloppy("0.0.0.0"),
Protocol: "SCTP",
Port: 80,
Scheduler: "",
@ -204,11 +205,11 @@ func Test_toIPVSService(t *testing.T) {
Timeout: 0,
Netmask: 0xffffffff,
AddressFamily: syscall.AF_INET,
Address: net.ParseIP("0.0.0.0"),
Address: netutils.ParseIPSloppy("0.0.0.0"),
PEName: "",
},
VirtualServer{
Address: net.ParseIP("0.0.0.0"),
Address: netutils.ParseIPSloppy("0.0.0.0"),
Protocol: "TCP",
Port: 80,
Scheduler: "",
@ -226,11 +227,11 @@ func Test_toIPVSService(t *testing.T) {
Timeout: 100,
Netmask: 128,
AddressFamily: syscall.AF_INET6,
Address: net.ParseIP("2012::beef"),
Address: netutils.ParseIPSloppy("2012::beef"),
PEName: "",
},
VirtualServer{
Address: net.ParseIP("2012::beef"),
Address: netutils.ParseIPSloppy("2012::beef"),
Protocol: "UDP",
Port: 33434,
Scheduler: "wlc",
@ -248,11 +249,11 @@ func Test_toIPVSService(t *testing.T) {
Timeout: 0,
Netmask: 0xffffffff,
AddressFamily: syscall.AF_INET,
Address: net.ParseIP("1.2.3.4"),
Address: netutils.ParseIPSloppy("1.2.3.4"),
PEName: "",
},
VirtualServer{
Address: net.ParseIP("1.2.3.4"),
Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: "",
Port: 0,
Scheduler: "lc",
@ -270,11 +271,11 @@ func Test_toIPVSService(t *testing.T) {
Timeout: 0,
Netmask: 128,
AddressFamily: syscall.AF_INET6,
Address: net.ParseIP("::0"),
Address: netutils.ParseIPSloppy("::0"),
PEName: "",
},
VirtualServer{
Address: net.ParseIP("::0"),
Address: netutils.ParseIPSloppy("::0"),
Protocol: "",
Port: 0,
Scheduler: "wrr",
@ -305,10 +306,10 @@ func Test_toRealServer(t *testing.T) {
Port: 54321,
ConnectionFlags: 0,
Weight: 1,
Address: net.ParseIP("1.2.3.4"),
Address: netutils.ParseIPSloppy("1.2.3.4"),
},
RealServer{
Address: net.ParseIP("1.2.3.4"),
Address: netutils.ParseIPSloppy("1.2.3.4"),
Port: 54321,
Weight: 1,
},
@ -318,10 +319,10 @@ func Test_toRealServer(t *testing.T) {
Port: 53,
ConnectionFlags: 0,
Weight: 1,
Address: net.ParseIP("2002::cafe"),
Address: netutils.ParseIPSloppy("2002::cafe"),
},
RealServer{
Address: net.ParseIP("2002::cafe"),
Address: netutils.ParseIPSloppy("2002::cafe"),
Port: 53,
Weight: 1,
},
@ -345,7 +346,7 @@ func Test_toIPVSDestination(t *testing.T) {
}{
{
RealServer{
Address: net.ParseIP("1.2.3.4"),
Address: netutils.ParseIPSloppy("1.2.3.4"),
Port: 54321,
Weight: 1,
},
@ -353,12 +354,12 @@ func Test_toIPVSDestination(t *testing.T) {
Port: 54321,
ConnectionFlags: 0,
Weight: 1,
Address: net.ParseIP("1.2.3.4"),
Address: netutils.ParseIPSloppy("1.2.3.4"),
},
},
{
RealServer{
Address: net.ParseIP("2002::cafe"),
Address: netutils.ParseIPSloppy("2002::cafe"),
Port: 53,
Weight: 1,
},
@ -366,7 +367,7 @@ func Test_toIPVSDestination(t *testing.T) {
Port: 53,
ConnectionFlags: 0,
Weight: 1,
Address: net.ParseIP("2002::cafe"),
Address: netutils.ParseIPSloppy("2002::cafe"),
},
},
}

Some files were not shown because too many files have changed in this diff Show More