mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Merge pull request #95239 from danwinship/dual-stack-node-ips-basic
DualStack: basic dual-stack node IP handling
This commit is contained in:
commit
05a46dbb60
@ -124,6 +124,7 @@ go_library(
|
||||
"//vendor/github.com/spf13/pflag:go_default_library",
|
||||
"//vendor/k8s.io/klog/v2:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
"//vendor/k8s.io/utils/net:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"//vendor/k8s.io/utils/inotify:go_default_library",
|
||||
|
@ -325,7 +325,7 @@ func (f *KubeletFlags) AddFlags(mainfs *pflag.FlagSet) {
|
||||
|
||||
fs.StringVar(&f.HostnameOverride, "hostname-override", f.HostnameOverride, "If non-empty, will use this string as identification instead of the actual hostname. If --cloud-provider is set, the cloud provider determines the name of the node (consult cloud provider documentation to determine if and how the hostname is used).")
|
||||
|
||||
fs.StringVar(&f.NodeIP, "node-ip", f.NodeIP, "IP address of the node. If set, kubelet will use this IP address for the node. If unset, kubelet will use the node's default IPv4 address, if any, or its default IPv6 address if it has no IPv4 addresses. You can pass '::' to make it prefer the default IPv6 address rather than the default IPv4 address.")
|
||||
fs.StringVar(&f.NodeIP, "node-ip", f.NodeIP, "IP address (or comma-separated dual-stack IP addresses) of the node. If unset, kubelet will use the node's default IPv4 address, if any, or its default IPv6 address if it has no IPv4 addresses. You can pass '::' to make it prefer the default IPv6 address rather than the default IPv4 address.")
|
||||
|
||||
fs.StringVar(&f.CertDirectory, "cert-dir", f.CertDirectory, "The directory where the TLS certs are located. "+
|
||||
"If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored.")
|
||||
|
@ -102,6 +102,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/volume/util/hostutil"
|
||||
"k8s.io/kubernetes/pkg/volume/util/subpath"
|
||||
"k8s.io/utils/exec"
|
||||
utilnet "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -1086,6 +1087,27 @@ func RunKubelet(kubeServer *options.KubeletServer, kubeDeps *kubelet.Dependencie
|
||||
// Setup event recorder if required.
|
||||
makeEventRecorder(kubeDeps, nodeName)
|
||||
|
||||
var nodeIPs []net.IP
|
||||
if kubeServer.NodeIP != "" {
|
||||
for _, ip := range strings.Split(kubeServer.NodeIP, ",") {
|
||||
parsedNodeIP := net.ParseIP(strings.TrimSpace(ip))
|
||||
if parsedNodeIP == nil {
|
||||
klog.Warningf("Could not parse --node-ip value %q; ignoring", ip)
|
||||
} else {
|
||||
nodeIPs = append(nodeIPs, parsedNodeIP)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.IPv6DualStack) && len(nodeIPs) > 1 {
|
||||
return fmt.Errorf("dual-stack --node-ip %q not supported in a single-stack cluster", kubeServer.NodeIP)
|
||||
} else if len(nodeIPs) > 2 || (len(nodeIPs) == 2 && utilnet.IsIPv6(nodeIPs[0]) == utilnet.IsIPv6(nodeIPs[1])) {
|
||||
return fmt.Errorf("bad --node-ip %q; must contain either a single IP or a dual-stack pair of IPs", kubeServer.NodeIP)
|
||||
} else if len(nodeIPs) == 2 && kubeServer.CloudProvider != "" {
|
||||
return fmt.Errorf("dual-stack --node-ip %q not supported when using a cloud provider", kubeServer.NodeIP)
|
||||
} else if len(nodeIPs) == 2 && (nodeIPs[0].IsUnspecified() || nodeIPs[1].IsUnspecified()) {
|
||||
return fmt.Errorf("dual-stack --node-ip %q cannot include '0.0.0.0' or '::'", kubeServer.NodeIP)
|
||||
}
|
||||
|
||||
capabilities.Initialize(capabilities.Capabilities{
|
||||
AllowPrivileged: true,
|
||||
})
|
||||
@ -1104,7 +1126,7 @@ func RunKubelet(kubeServer *options.KubeletServer, kubeDeps *kubelet.Dependencie
|
||||
hostname,
|
||||
hostnameOverridden,
|
||||
nodeName,
|
||||
kubeServer.NodeIP,
|
||||
nodeIPs,
|
||||
kubeServer.ProviderID,
|
||||
kubeServer.CloudProvider,
|
||||
kubeServer.CertDirectory,
|
||||
@ -1178,7 +1200,7 @@ func createAndInitKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
hostname string,
|
||||
hostnameOverridden bool,
|
||||
nodeName types.NodeName,
|
||||
nodeIP string,
|
||||
nodeIPs []net.IP,
|
||||
providerID string,
|
||||
cloudProvider string,
|
||||
certDirectory string,
|
||||
@ -1209,7 +1231,7 @@ func createAndInitKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
hostname,
|
||||
hostnameOverridden,
|
||||
nodeName,
|
||||
nodeIP,
|
||||
nodeIPs,
|
||||
providerID,
|
||||
cloudProvider,
|
||||
certDirectory,
|
||||
|
@ -334,7 +334,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
hostname string,
|
||||
hostnameOverridden bool,
|
||||
nodeName types.NodeName,
|
||||
nodeIP string,
|
||||
nodeIPs []net.IP,
|
||||
providerID string,
|
||||
cloudProvider string,
|
||||
certDirectory string,
|
||||
@ -462,7 +462,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
}
|
||||
}
|
||||
httpClient := &http.Client{}
|
||||
parsedNodeIP := net.ParseIP(nodeIP)
|
||||
|
||||
klet := &Kubelet{
|
||||
hostname: hostname,
|
||||
@ -477,7 +476,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
registerNode: registerNode,
|
||||
registerWithTaints: registerWithTaints,
|
||||
registerSchedulable: registerSchedulable,
|
||||
dnsConfigurer: dns.NewConfigurer(kubeDeps.Recorder, nodeRef, parsedNodeIP, clusterDNS, kubeCfg.ClusterDomain, kubeCfg.ResolverConfig),
|
||||
dnsConfigurer: dns.NewConfigurer(kubeDeps.Recorder, nodeRef, nodeIPs, clusterDNS, kubeCfg.ClusterDomain, kubeCfg.ResolverConfig),
|
||||
serviceLister: serviceLister,
|
||||
serviceHasSynced: serviceHasSynced,
|
||||
nodeLister: nodeLister,
|
||||
@ -506,7 +505,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
containerManager: kubeDeps.ContainerManager,
|
||||
containerRuntimeName: containerRuntime,
|
||||
redirectContainerStreaming: crOptions.RedirectContainerStreaming,
|
||||
nodeIP: parsedNodeIP,
|
||||
nodeIPs: nodeIPs,
|
||||
nodeIPValidator: validateNodeIP,
|
||||
clock: clock.RealClock{},
|
||||
enableControllerAttachDetach: kubeCfg.EnableControllerAttachDetach,
|
||||
@ -1042,8 +1041,8 @@ type Kubelet struct {
|
||||
// oneTimeInitializer is used to initialize modules that are dependent on the runtime to be up.
|
||||
oneTimeInitializer sync.Once
|
||||
|
||||
// If non-nil, use this IP address for the node
|
||||
nodeIP net.IP
|
||||
// If set, use this IP address or addresses for the node
|
||||
nodeIPs []net.IP
|
||||
|
||||
// use this function to validate the kubelet nodeIP
|
||||
nodeIPValidator func(net.IP) error
|
||||
|
@ -262,23 +262,23 @@ func (kl *Kubelet) GetPodCgroupRoot() string {
|
||||
return kl.containerManager.GetPodCgroupRoot()
|
||||
}
|
||||
|
||||
// GetHostIP returns host IP or nil in case of error.
|
||||
func (kl *Kubelet) GetHostIP() (net.IP, error) {
|
||||
// GetHostIPs returns host IPs or nil in case of error.
|
||||
func (kl *Kubelet) GetHostIPs() ([]net.IP, error) {
|
||||
node, err := kl.GetNode()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get node: %v", err)
|
||||
}
|
||||
return utilnode.GetNodeHostIP(node)
|
||||
return utilnode.GetNodeHostIPs(node)
|
||||
}
|
||||
|
||||
// getHostIPAnyway attempts to return the host IP from kubelet's nodeInfo, or
|
||||
// getHostIPsAnyWay attempts to return the host IPs from kubelet's nodeInfo, or
|
||||
// the initialNode.
|
||||
func (kl *Kubelet) getHostIPAnyWay() (net.IP, error) {
|
||||
func (kl *Kubelet) getHostIPsAnyWay() ([]net.IP, error) {
|
||||
node, err := kl.getNodeAnyWay()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return utilnode.GetNodeHostIP(node)
|
||||
return utilnode.GetNodeHostIPs(node)
|
||||
}
|
||||
|
||||
// GetExtraSupplementalGroupsForPod returns a list of the extra
|
||||
|
@ -35,9 +35,9 @@ func (kl *Kubelet) initNetworkUtil() {
|
||||
exec := utilexec.New()
|
||||
|
||||
// At this point in startup we don't know the actual node IPs, so we configure dual stack iptables
|
||||
// rules if the node _might_ be dual-stack, and single-stack based on requested nodeIP otherwise.
|
||||
// rules if the node _might_ be dual-stack, and single-stack based on requested nodeIPs[0] otherwise.
|
||||
maybeDualStack := utilfeature.DefaultFeatureGate.Enabled(features.IPv6DualStack)
|
||||
ipv6Primary := kl.nodeIP != nil && utilnet.IsIPv6(kl.nodeIP)
|
||||
ipv6Primary := kl.nodeIPs != nil && utilnet.IsIPv6(kl.nodeIPs[0])
|
||||
|
||||
var iptClients []utiliptables.Interface
|
||||
if maybeDualStack || !ipv6Primary {
|
||||
|
@ -587,7 +587,7 @@ func (kl *Kubelet) defaultNodeStatusFuncs() []func(*v1.Node) error {
|
||||
}
|
||||
var setters []func(n *v1.Node) error
|
||||
setters = append(setters,
|
||||
nodestatus.NodeAddress(kl.nodeIP, kl.nodeIPValidator, kl.hostname, kl.hostnameOverridden, kl.externalCloudProvider, kl.cloud, nodeAddressesFunc),
|
||||
nodestatus.NodeAddress(kl.nodeIPs, kl.nodeIPValidator, kl.hostname, kl.hostnameOverridden, kl.externalCloudProvider, kl.cloud, nodeAddressesFunc),
|
||||
nodestatus.MachineInfo(string(kl.nodeName), kl.maxPods, kl.podsPerCore, kl.GetCachedMachineInfo, kl.containerManager.GetCapacity,
|
||||
kl.containerManager.GetDevicePluginResourceCapacity, kl.containerManager.GetNodeAllocatableReservation, kl.recordEvent),
|
||||
nodestatus.VersionInfo(kl.cadvisor.VersionInfo, kl.containerRuntime.Type, kl.containerRuntime.Version),
|
||||
|
@ -811,11 +811,11 @@ func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *v1.ObjectFieldSelector, pod
|
||||
case "spec.serviceAccountName":
|
||||
return pod.Spec.ServiceAccountName, nil
|
||||
case "status.hostIP":
|
||||
hostIP, err := kl.getHostIPAnyWay()
|
||||
hostIPs, err := kl.getHostIPsAnyWay()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return hostIP.String(), nil
|
||||
return hostIPs[0].String(), nil
|
||||
case "status.podIP":
|
||||
return podIP, nil
|
||||
case "status.podIPs":
|
||||
@ -1531,14 +1531,17 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po
|
||||
})
|
||||
|
||||
if kl.kubeClient != nil {
|
||||
hostIP, err := kl.getHostIPAnyWay()
|
||||
hostIPs, err := kl.getHostIPsAnyWay()
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Cannot get host IP: %v", err)
|
||||
klog.V(4).Infof("Cannot get host IPs: %v", err)
|
||||
} else {
|
||||
s.HostIP = hostIP.String()
|
||||
s.HostIP = hostIPs[0].String()
|
||||
if kubecontainer.IsHostNetworkPod(pod) && s.PodIP == "" {
|
||||
s.PodIP = hostIP.String()
|
||||
s.PodIP = hostIPs[0].String()
|
||||
s.PodIPs = []v1.PodIP{{IP: s.PodIP}}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.IPv6DualStack) && len(hostIPs) == 2 {
|
||||
s.PodIPs = append(s.PodIPs, v1.PodIP{IP: hostIPs[1].String()})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
@ -2489,3 +2490,126 @@ func TestPodResourcesAreReclaimed(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateAPIPodStatusHostNetworkPodIPs(t *testing.T) {
|
||||
testcases := []struct {
|
||||
name string
|
||||
dualStack bool
|
||||
nodeAddresses []v1.NodeAddress
|
||||
criPodIPs []string
|
||||
podIPs []v1.PodIP
|
||||
}{
|
||||
{
|
||||
name: "Simple",
|
||||
nodeAddresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "10.0.0.1"},
|
||||
},
|
||||
podIPs: []v1.PodIP{
|
||||
{IP: "10.0.0.1"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "InternalIP is preferred over ExternalIP",
|
||||
nodeAddresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeExternalIP, Address: "192.168.0.1"},
|
||||
{Type: v1.NodeInternalIP, Address: "10.0.0.1"},
|
||||
},
|
||||
podIPs: []v1.PodIP{
|
||||
{IP: "10.0.0.1"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Dual-stack addresses are ignored in single-stack cluster",
|
||||
nodeAddresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "10.0.0.1"},
|
||||
{Type: v1.NodeInternalIP, Address: "fd01::1234"},
|
||||
},
|
||||
podIPs: []v1.PodIP{
|
||||
{IP: "10.0.0.1"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Single-stack addresses in dual-stack cluster",
|
||||
nodeAddresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "10.0.0.1"},
|
||||
},
|
||||
dualStack: true,
|
||||
podIPs: []v1.PodIP{
|
||||
{IP: "10.0.0.1"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Multiple single-stack addresses in dual-stack cluster",
|
||||
nodeAddresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "10.0.0.1"},
|
||||
{Type: v1.NodeInternalIP, Address: "10.0.0.2"},
|
||||
{Type: v1.NodeExternalIP, Address: "192.168.0.1"},
|
||||
},
|
||||
dualStack: true,
|
||||
podIPs: []v1.PodIP{
|
||||
{IP: "10.0.0.1"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Dual-stack addresses in dual-stack cluster",
|
||||
nodeAddresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "10.0.0.1"},
|
||||
{Type: v1.NodeInternalIP, Address: "fd01::1234"},
|
||||
},
|
||||
dualStack: true,
|
||||
podIPs: []v1.PodIP{
|
||||
{IP: "10.0.0.1"},
|
||||
{IP: "fd01::1234"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "CRI PodIPs override NodeAddresses",
|
||||
nodeAddresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "10.0.0.1"},
|
||||
{Type: v1.NodeInternalIP, Address: "fd01::1234"},
|
||||
},
|
||||
dualStack: true,
|
||||
criPodIPs: []string{"192.168.0.1"},
|
||||
podIPs: []v1.PodIP{
|
||||
{IP: "192.168.0.1"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kl := testKubelet.kubelet
|
||||
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.IPv6DualStack, tc.dualStack)()
|
||||
|
||||
kl.nodeLister = testNodeLister{nodes: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: string(kl.nodeName)},
|
||||
Status: v1.NodeStatus{
|
||||
Addresses: tc.nodeAddresses,
|
||||
},
|
||||
},
|
||||
}}
|
||||
|
||||
pod := podWithUIDNameNs("12345", "test-pod", "test-namespace")
|
||||
pod.Spec.HostNetwork = true
|
||||
|
||||
criStatus := &kubecontainer.PodStatus{
|
||||
ID: pod.UID,
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
IPs: tc.criPodIPs,
|
||||
}
|
||||
|
||||
status := kl.generateAPIPodStatus(pod, criStatus)
|
||||
if !reflect.DeepEqual(status.PodIPs, tc.podIPs) {
|
||||
t.Fatalf("Expected PodIPs %#v, got %#v", tc.podIPs, status.PodIPs)
|
||||
}
|
||||
if tc.criPodIPs == nil && status.HostIP != status.PodIPs[0].IP {
|
||||
t.Fatalf("Expected HostIP %q to equal PodIPs[0].IP %q", status.HostIP, status.PodIPs[0].IP)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -15,6 +15,7 @@ go_library(
|
||||
"//staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2:go_default_library",
|
||||
"//vendor/k8s.io/klog/v2:go_default_library",
|
||||
"//vendor/k8s.io/utils/io:go_default_library",
|
||||
"//vendor/k8s.io/utils/net:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -35,6 +35,7 @@ import (
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
utilio "k8s.io/utils/io"
|
||||
utilnet "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -58,7 +59,7 @@ const (
|
||||
type Configurer struct {
|
||||
recorder record.EventRecorder
|
||||
nodeRef *v1.ObjectReference
|
||||
nodeIP net.IP
|
||||
nodeIPs []net.IP
|
||||
|
||||
// If non-nil, use this for container DNS server.
|
||||
clusterDNS []net.IP
|
||||
@ -71,11 +72,11 @@ type Configurer struct {
|
||||
}
|
||||
|
||||
// NewConfigurer returns a DNS configurer for launching pods.
|
||||
func NewConfigurer(recorder record.EventRecorder, nodeRef *v1.ObjectReference, nodeIP net.IP, clusterDNS []net.IP, clusterDomain, resolverConfig string) *Configurer {
|
||||
func NewConfigurer(recorder record.EventRecorder, nodeRef *v1.ObjectReference, nodeIPs []net.IP, clusterDNS []net.IP, clusterDomain, resolverConfig string) *Configurer {
|
||||
return &Configurer{
|
||||
recorder: recorder,
|
||||
nodeRef: nodeRef,
|
||||
nodeIP: nodeIP,
|
||||
nodeIPs: nodeIPs,
|
||||
clusterDNS: clusterDNS,
|
||||
ClusterDomain: clusterDomain,
|
||||
ResolverConfig: resolverConfig,
|
||||
@ -373,11 +374,15 @@ func (c *Configurer) GetPodDNS(pod *v1.Pod) (*runtimeapi.DNSConfig, error) {
|
||||
// local machine". A nameserver setting of localhost is equivalent to
|
||||
// this documented behavior.
|
||||
if c.ResolverConfig == "" {
|
||||
switch {
|
||||
case c.nodeIP == nil || c.nodeIP.To4() != nil:
|
||||
dnsConfig.Servers = []string{"127.0.0.1"}
|
||||
case c.nodeIP.To16() != nil:
|
||||
dnsConfig.Servers = []string{"::1"}
|
||||
for _, nodeIP := range c.nodeIPs {
|
||||
if utilnet.IsIPv6(nodeIP) {
|
||||
dnsConfig.Servers = append(dnsConfig.Servers, "::1")
|
||||
} else {
|
||||
dnsConfig.Servers = append(dnsConfig.Servers, "127.0.0.1")
|
||||
}
|
||||
}
|
||||
if len(dnsConfig.Servers) == 0 {
|
||||
dnsConfig.Servers = append(dnsConfig.Servers, "127.0.0.1")
|
||||
}
|
||||
dnsConfig.Searches = []string{"."}
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ const (
|
||||
type Setter func(node *v1.Node) error
|
||||
|
||||
// NodeAddress returns a Setter that updates address-related information on the node.
|
||||
func NodeAddress(nodeIP net.IP, // typically Kubelet.nodeIP
|
||||
func NodeAddress(nodeIPs []net.IP, // typically Kubelet.nodeIPs
|
||||
validateNodeIPFunc func(net.IP) error, // typically Kubelet.nodeIPValidator
|
||||
hostname string, // typically Kubelet.hostname
|
||||
hostnameOverridden bool, // was the hostname force set?
|
||||
@ -65,10 +65,19 @@ func NodeAddress(nodeIP net.IP, // typically Kubelet.nodeIP
|
||||
cloud cloudprovider.Interface, // typically Kubelet.cloud
|
||||
nodeAddressesFunc func() ([]v1.NodeAddress, error), // typically Kubelet.cloudResourceSyncManager.NodeAddresses
|
||||
) Setter {
|
||||
var nodeIP, secondaryNodeIP net.IP
|
||||
if len(nodeIPs) > 0 {
|
||||
nodeIP = nodeIPs[0]
|
||||
}
|
||||
preferIPv4 := nodeIP == nil || nodeIP.To4() != nil
|
||||
isPreferredIPFamily := func(ip net.IP) bool { return (ip.To4() != nil) == preferIPv4 }
|
||||
nodeIPSpecified := nodeIP != nil && !nodeIP.IsUnspecified()
|
||||
|
||||
if len(nodeIPs) > 1 {
|
||||
secondaryNodeIP = nodeIPs[1]
|
||||
}
|
||||
secondaryNodeIPSpecified := secondaryNodeIP != nil && !secondaryNodeIP.IsUnspecified()
|
||||
|
||||
return func(node *v1.Node) error {
|
||||
if nodeIPSpecified {
|
||||
if err := validateNodeIPFunc(nodeIP); err != nil {
|
||||
@ -76,6 +85,12 @@ func NodeAddress(nodeIP net.IP, // typically Kubelet.nodeIP
|
||||
}
|
||||
klog.V(2).Infof("Using node IP: %q", nodeIP.String())
|
||||
}
|
||||
if secondaryNodeIPSpecified {
|
||||
if err := validateNodeIPFunc(secondaryNodeIP); err != nil {
|
||||
return fmt.Errorf("failed to validate secondaryNodeIP: %v", err)
|
||||
}
|
||||
klog.V(2).Infof("Using secondary node IP: %q", secondaryNodeIP.String())
|
||||
}
|
||||
|
||||
if externalCloudProvider {
|
||||
if nodeIPSpecified {
|
||||
@ -185,6 +200,12 @@ func NodeAddress(nodeIP net.IP, // typically Kubelet.nodeIP
|
||||
}
|
||||
}
|
||||
node.Status.Addresses = nodeAddresses
|
||||
} else if nodeIPSpecified && secondaryNodeIPSpecified {
|
||||
node.Status.Addresses = []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: nodeIP.String()},
|
||||
{Type: v1.NodeInternalIP, Address: secondaryNodeIP.String()},
|
||||
{Type: v1.NodeHostName, Address: hostname},
|
||||
}
|
||||
} else {
|
||||
var ipAddr net.IP
|
||||
var err error
|
||||
|
@ -50,7 +50,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
testKubeletHostname = "127.0.0.1"
|
||||
testKubeletHostname = "hostname"
|
||||
)
|
||||
|
||||
// TODO(mtaufen): below is ported from the old kubelet_node_status_test.go code, potentially add more test coverage for NodeAddress setter in future
|
||||
@ -86,8 +86,8 @@ func TestNodeAddress(t *testing.T) {
|
||||
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||
},
|
||||
expectedAddresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
|
||||
{Type: v1.NodeExternalIP, Address: "55.55.55.55"},
|
||||
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
|
||||
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||
},
|
||||
shouldError: false,
|
||||
@ -416,7 +416,7 @@ func TestNodeAddress(t *testing.T) {
|
||||
}
|
||||
|
||||
// construct setter
|
||||
setter := NodeAddress(nodeIP,
|
||||
setter := NodeAddress([]net.IP{nodeIP},
|
||||
nodeIPValidator,
|
||||
hostname,
|
||||
testCase.hostnameOverride,
|
||||
@ -433,9 +433,69 @@ func TestNodeAddress(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
// Sort both sets for consistent equality
|
||||
sortNodeAddresses(testCase.expectedAddresses)
|
||||
sortNodeAddresses(existingNode.Status.Addresses)
|
||||
assert.True(t, apiequality.Semantic.DeepEqual(testCase.expectedAddresses, existingNode.Status.Addresses),
|
||||
"Diff: %s", diff.ObjectDiff(testCase.expectedAddresses, existingNode.Status.Addresses))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// We can't test failure or autodetection cases here because the relevant code isn't mockable
|
||||
func TestNodeAddress_NoCloudProvider(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
nodeIPs []net.IP
|
||||
expectedAddresses []v1.NodeAddress
|
||||
}{
|
||||
{
|
||||
name: "Single --node-ip",
|
||||
nodeIPs: []net.IP{net.ParseIP("10.1.1.1")},
|
||||
expectedAddresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
|
||||
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Dual --node-ips",
|
||||
nodeIPs: []net.IP{net.ParseIP("10.1.1.1"), net.ParseIP("fd01::1234")},
|
||||
expectedAddresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
|
||||
{Type: v1.NodeInternalIP, Address: "fd01::1234"},
|
||||
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, testCase := range cases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
// testCase setup
|
||||
existingNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Annotations: make(map[string]string)},
|
||||
Spec: v1.NodeSpec{},
|
||||
Status: v1.NodeStatus{
|
||||
Addresses: []v1.NodeAddress{},
|
||||
},
|
||||
}
|
||||
|
||||
nodeIPValidator := func(nodeIP net.IP) error {
|
||||
return nil
|
||||
}
|
||||
nodeAddressesFunc := func() ([]v1.NodeAddress, error) {
|
||||
return nil, fmt.Errorf("not reached")
|
||||
}
|
||||
|
||||
// construct setter
|
||||
setter := NodeAddress(testCase.nodeIPs,
|
||||
nodeIPValidator,
|
||||
testKubeletHostname,
|
||||
false, // hostnameOverridden
|
||||
false, // externalCloudProvider
|
||||
nil, // cloud
|
||||
nodeAddressesFunc)
|
||||
|
||||
// call setter on existing node
|
||||
err := setter(existingNode)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
assert.True(t, apiequality.Semantic.DeepEqual(testCase.expectedAddresses, existingNode.Status.Addresses),
|
||||
"Diff: %s", diff.ObjectDiff(testCase.expectedAddresses, existingNode.Status.Addresses))
|
||||
@ -1678,19 +1738,6 @@ func TestVolumeLimits(t *testing.T) {
|
||||
|
||||
// Test Helpers:
|
||||
|
||||
// sortableNodeAddress is a type for sorting []v1.NodeAddress
|
||||
type sortableNodeAddress []v1.NodeAddress
|
||||
|
||||
func (s sortableNodeAddress) Len() int { return len(s) }
|
||||
func (s sortableNodeAddress) Less(i, j int) bool {
|
||||
return (string(s[i].Type) + s[i].Address) < (string(s[j].Type) + s[j].Address)
|
||||
}
|
||||
func (s sortableNodeAddress) Swap(i, j int) { s[j], s[i] = s[i], s[j] }
|
||||
|
||||
func sortNodeAddresses(addrs sortableNodeAddress) {
|
||||
sort.Sort(addrs)
|
||||
}
|
||||
|
||||
// testEvent is used to record events for tests
|
||||
type testEvent struct {
|
||||
eventType string
|
||||
|
@ -231,7 +231,11 @@ func (kvh *kubeletVolumeHost) GetHostName() string {
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) GetHostIP() (net.IP, error) {
|
||||
return kvh.kubelet.GetHostIP()
|
||||
hostIPs, err := kvh.kubelet.GetHostIPs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return hostIPs[0], err
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) GetNodeAllocatable() (v1.ResourceList, error) {
|
||||
|
@ -11,15 +11,18 @@ go_library(
|
||||
srcs = ["node.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/util/node",
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/klog/v2:go_default_library",
|
||||
"//vendor/k8s.io/utils/net:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -28,8 +31,11 @@ go_test(
|
||||
srcs = ["node_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -33,8 +33,11 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
utilnet "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -90,27 +93,60 @@ func GetPreferredNodeAddress(node *v1.Node, preferredAddressTypes []v1.NodeAddre
|
||||
return "", &NoMatchError{addresses: node.Status.Addresses}
|
||||
}
|
||||
|
||||
// GetNodeHostIP returns the provided node's IP, based on the priority:
|
||||
// 1. NodeInternalIP
|
||||
// 2. NodeExternalIP
|
||||
func GetNodeHostIP(node *v1.Node) (net.IP, error) {
|
||||
addresses := node.Status.Addresses
|
||||
addressMap := make(map[v1.NodeAddressType][]v1.NodeAddress)
|
||||
for i := range addresses {
|
||||
addressMap[addresses[i].Type] = append(addressMap[addresses[i].Type], addresses[i])
|
||||
// GetNodeHostIPs returns the provided node's IP(s); either a single "primary IP" for the
|
||||
// node in a single-stack cluster, or a dual-stack pair of IPs in a dual-stack cluster
|
||||
// (for nodes that actually have dual-stack IPs). Among other things, the IPs returned
|
||||
// from this function are used as the `.status.PodIPs` values for host-network pods on the
|
||||
// node, and the first IP is used as the `.status.HostIP` for all pods on the node.
|
||||
func GetNodeHostIPs(node *v1.Node) ([]net.IP, error) {
|
||||
// Re-sort the addresses with InternalIPs first and then ExternalIPs
|
||||
allIPs := make([]net.IP, 0, len(node.Status.Addresses))
|
||||
for _, addr := range node.Status.Addresses {
|
||||
if addr.Type == v1.NodeInternalIP {
|
||||
ip := net.ParseIP(addr.Address)
|
||||
if ip != nil {
|
||||
allIPs = append(allIPs, ip)
|
||||
}
|
||||
}
|
||||
}
|
||||
if addresses, ok := addressMap[v1.NodeInternalIP]; ok {
|
||||
return net.ParseIP(addresses[0].Address), nil
|
||||
for _, addr := range node.Status.Addresses {
|
||||
if addr.Type == v1.NodeExternalIP {
|
||||
ip := net.ParseIP(addr.Address)
|
||||
if ip != nil {
|
||||
allIPs = append(allIPs, ip)
|
||||
}
|
||||
}
|
||||
}
|
||||
if addresses, ok := addressMap[v1.NodeExternalIP]; ok {
|
||||
return net.ParseIP(addresses[0].Address), nil
|
||||
if len(allIPs) == 0 {
|
||||
return nil, fmt.Errorf("host IP unknown; known addresses: %v", node.Status.Addresses)
|
||||
}
|
||||
return nil, fmt.Errorf("host IP unknown; known addresses: %v", addresses)
|
||||
|
||||
nodeIPs := []net.IP{allIPs[0]}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.IPv6DualStack) {
|
||||
for _, ip := range allIPs {
|
||||
if utilnet.IsIPv6(ip) != utilnet.IsIPv6(nodeIPs[0]) {
|
||||
nodeIPs = append(nodeIPs, ip)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nodeIPs, nil
|
||||
}
|
||||
|
||||
// GetNodeIP returns the ip of node with the provided hostname
|
||||
// If required, wait for the node to be defined.
|
||||
func GetNodeIP(client clientset.Interface, hostname string) net.IP {
|
||||
// GetNodeHostIP returns the provided node's "primary" IP; see GetNodeHostIPs for more details
|
||||
func GetNodeHostIP(node *v1.Node) (net.IP, error) {
|
||||
ips, err := GetNodeHostIPs(node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// GetNodeHostIPs always returns at least one IP if it didn't return an error
|
||||
return ips[0], nil
|
||||
}
|
||||
|
||||
// GetNodeIP returns an IP (as with GetNodeHostIP) for the node with the provided name.
|
||||
// If required, it will wait for the node to be created.
|
||||
func GetNodeIP(client clientset.Interface, name string) net.IP {
|
||||
var nodeIP net.IP
|
||||
backoff := wait.Backoff{
|
||||
Steps: 6,
|
||||
@ -120,7 +156,7 @@ func GetNodeIP(client clientset.Interface, hostname string) net.IP {
|
||||
}
|
||||
|
||||
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
|
||||
node, err := client.CoreV1().Nodes().Get(context.TODO(), hostname, metav1.GetOptions{})
|
||||
node, err := client.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to retrieve node info: %v", err)
|
||||
return false, nil
|
||||
|
@ -17,10 +17,15 @@ limitations under the License.
|
||||
package node
|
||||
|
||||
import (
|
||||
"net"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
func TestGetPreferredAddress(t *testing.T) {
|
||||
@ -89,6 +94,147 @@ func TestGetPreferredAddress(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetNodeHostIPs(t *testing.T) {
|
||||
testcases := []struct {
|
||||
name string
|
||||
addresses []v1.NodeAddress
|
||||
dualStack bool
|
||||
|
||||
expectIPs []net.IP
|
||||
}{
|
||||
{
|
||||
name: "no addresses",
|
||||
expectIPs: nil,
|
||||
},
|
||||
{
|
||||
name: "no InternalIP/ExternalIP",
|
||||
addresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeHostName, Address: "example.com"},
|
||||
},
|
||||
expectIPs: nil,
|
||||
},
|
||||
{
|
||||
name: "IPv4-only, simple",
|
||||
addresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "1.2.3.4"},
|
||||
{Type: v1.NodeExternalIP, Address: "4.3.2.1"},
|
||||
{Type: v1.NodeExternalIP, Address: "4.3.2.2"},
|
||||
},
|
||||
expectIPs: []net.IP{net.ParseIP("1.2.3.4")},
|
||||
},
|
||||
{
|
||||
name: "IPv4-only, external-first",
|
||||
addresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeExternalIP, Address: "4.3.2.1"},
|
||||
{Type: v1.NodeExternalIP, Address: "4.3.2.2"},
|
||||
{Type: v1.NodeInternalIP, Address: "1.2.3.4"},
|
||||
},
|
||||
expectIPs: []net.IP{net.ParseIP("1.2.3.4")},
|
||||
},
|
||||
{
|
||||
name: "IPv4-only, no internal",
|
||||
addresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeExternalIP, Address: "4.3.2.1"},
|
||||
{Type: v1.NodeExternalIP, Address: "4.3.2.2"},
|
||||
},
|
||||
expectIPs: []net.IP{net.ParseIP("4.3.2.1")},
|
||||
},
|
||||
{
|
||||
name: "dual-stack node, single-stack cluster",
|
||||
addresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "1.2.3.4"},
|
||||
{Type: v1.NodeExternalIP, Address: "4.3.2.1"},
|
||||
{Type: v1.NodeExternalIP, Address: "4.3.2.2"},
|
||||
{Type: v1.NodeInternalIP, Address: "a:b::c:d"},
|
||||
{Type: v1.NodeExternalIP, Address: "d:c::b:a"},
|
||||
},
|
||||
expectIPs: []net.IP{net.ParseIP("1.2.3.4")},
|
||||
},
|
||||
{
|
||||
name: "dual-stack node, dual-stack cluster",
|
||||
addresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "1.2.3.4"},
|
||||
{Type: v1.NodeExternalIP, Address: "4.3.2.1"},
|
||||
{Type: v1.NodeExternalIP, Address: "4.3.2.2"},
|
||||
{Type: v1.NodeInternalIP, Address: "a:b::c:d"},
|
||||
{Type: v1.NodeExternalIP, Address: "d:c::b:a"},
|
||||
},
|
||||
dualStack: true,
|
||||
expectIPs: []net.IP{net.ParseIP("1.2.3.4"), net.ParseIP("a:b::c:d")},
|
||||
},
|
||||
{
|
||||
name: "dual-stack node, different order, single-stack cluster",
|
||||
addresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "1.2.3.4"},
|
||||
{Type: v1.NodeInternalIP, Address: "a:b::c:d"},
|
||||
{Type: v1.NodeExternalIP, Address: "4.3.2.1"},
|
||||
{Type: v1.NodeExternalIP, Address: "4.3.2.2"},
|
||||
{Type: v1.NodeExternalIP, Address: "d:c::b:a"},
|
||||
},
|
||||
expectIPs: []net.IP{net.ParseIP("1.2.3.4")},
|
||||
},
|
||||
{
|
||||
name: "dual-stack node, different order, dual-stack cluster",
|
||||
addresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "1.2.3.4"},
|
||||
{Type: v1.NodeInternalIP, Address: "a:b::c:d"},
|
||||
{Type: v1.NodeExternalIP, Address: "4.3.2.1"},
|
||||
{Type: v1.NodeExternalIP, Address: "4.3.2.2"},
|
||||
{Type: v1.NodeExternalIP, Address: "d:c::b:a"},
|
||||
},
|
||||
dualStack: true,
|
||||
expectIPs: []net.IP{net.ParseIP("1.2.3.4"), net.ParseIP("a:b::c:d")},
|
||||
},
|
||||
{
|
||||
name: "dual-stack node, IPv6-first, no internal IPv4, single-stack cluster",
|
||||
addresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "a:b::c:d"},
|
||||
{Type: v1.NodeExternalIP, Address: "d:c::b:a"},
|
||||
{Type: v1.NodeExternalIP, Address: "4.3.2.1"},
|
||||
{Type: v1.NodeExternalIP, Address: "4.3.2.2"},
|
||||
},
|
||||
expectIPs: []net.IP{net.ParseIP("a:b::c:d")},
|
||||
},
|
||||
{
|
||||
name: "dual-stack node, IPv6-first, no internal IPv4, dual-stack cluster",
|
||||
addresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "a:b::c:d"},
|
||||
{Type: v1.NodeExternalIP, Address: "d:c::b:a"},
|
||||
{Type: v1.NodeExternalIP, Address: "4.3.2.1"},
|
||||
{Type: v1.NodeExternalIP, Address: "4.3.2.2"},
|
||||
},
|
||||
dualStack: true,
|
||||
expectIPs: []net.IP{net.ParseIP("a:b::c:d"), net.ParseIP("4.3.2.1")},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.IPv6DualStack, tc.dualStack)()
|
||||
node := &v1.Node{
|
||||
Status: v1.NodeStatus{Addresses: tc.addresses},
|
||||
}
|
||||
nodeIPs, err := GetNodeHostIPs(node)
|
||||
nodeIP, err2 := GetNodeHostIP(node)
|
||||
|
||||
if (err == nil && err2 != nil) || (err != nil && err2 == nil) {
|
||||
t.Errorf("GetNodeHostIPs() returned error=%q but GetNodeHostIP() returned error=%q", err, err2)
|
||||
}
|
||||
if err != nil {
|
||||
if tc.expectIPs != nil {
|
||||
t.Errorf("expected %v, got error (%v)", tc.expectIPs, err)
|
||||
}
|
||||
} else if tc.expectIPs == nil {
|
||||
t.Errorf("expected error, got %v", nodeIPs)
|
||||
} else if !reflect.DeepEqual(nodeIPs, tc.expectIPs) {
|
||||
t.Errorf("expected %v, got %v", tc.expectIPs, nodeIPs)
|
||||
} else if !nodeIP.Equal(nodeIPs[0]) {
|
||||
t.Errorf("GetNodeHostIP did not return same primary (%s) as GetNodeHostIPs (%s)", nodeIP.String(), nodeIPs[0].String())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetHostname(t *testing.T) {
|
||||
testCases := []struct {
|
||||
hostName string
|
||||
|
Loading…
Reference in New Issue
Block a user