mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Merge pull request #55265 from MrHohn/kubelet-network-codes-move
Automatic merge from submit-queue (batch tested with PRs 55265, 54092, 55353, 53733, 55385). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Rearrange kubelet networking codes **What this PR does / why we need it**: kubelet package contains multiple `*network.go` files, also some of the networking related codes scatter in different places. This PR rearranges the networking codes into `kubelet_network.go`. There is no functional changes. **Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*: Fixes #55451 **Special notes for your reviewer**: **Release note**: ```release-note NONE ```
This commit is contained in:
commit
f2b45cb266
@ -18,7 +18,6 @@ go_library(
|
||||
"kubelet_pods.go",
|
||||
"kubelet_resources.go",
|
||||
"kubelet_volumes.go",
|
||||
"networks.go",
|
||||
"oom_watcher.go",
|
||||
"pod_container_deletor.go",
|
||||
"pod_workers.go",
|
||||
@ -153,7 +152,6 @@ go_test(
|
||||
"kubelet_resources_test.go",
|
||||
"kubelet_test.go",
|
||||
"kubelet_volumes_test.go",
|
||||
"networks_test.go",
|
||||
"oom_watcher_test.go",
|
||||
"pod_container_deletor_test.go",
|
||||
"pod_workers_test.go",
|
||||
|
@ -19,13 +19,11 @@ package kubelet
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
goruntime "runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
@ -1401,64 +1399,6 @@ func (kl *Kubelet) GetKubeClient() clientset.Interface {
|
||||
return kl.kubeClient
|
||||
}
|
||||
|
||||
// GetClusterDNS returns a list of the DNS servers, a list of the DNS search
|
||||
// domains of the cluster, and a list of resolv.conf options.
|
||||
func (kl *Kubelet) GetClusterDNS(pod *v1.Pod) ([]string, []string, []string, bool, error) {
|
||||
var hostDNS, hostSearch, hostOptions []string
|
||||
// Get host DNS settings
|
||||
if kl.resolverConfig != "" {
|
||||
f, err := os.Open(kl.resolverConfig)
|
||||
if err != nil {
|
||||
return nil, nil, nil, false, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
hostDNS, hostSearch, hostOptions, err = kl.parseResolvConf(f)
|
||||
if err != nil {
|
||||
return nil, nil, nil, false, err
|
||||
}
|
||||
}
|
||||
useClusterFirstPolicy := ((pod.Spec.DNSPolicy == v1.DNSClusterFirst && !kubecontainer.IsHostNetworkPod(pod)) || pod.Spec.DNSPolicy == v1.DNSClusterFirstWithHostNet)
|
||||
if useClusterFirstPolicy && len(kl.clusterDNS) == 0 {
|
||||
// clusterDNS is not known.
|
||||
// pod with ClusterDNSFirst Policy cannot be created
|
||||
kl.recorder.Eventf(pod, v1.EventTypeWarning, "MissingClusterDNS", "kubelet does not have ClusterDNS IP configured and cannot create Pod using %q policy. Falling back to DNSDefault policy.", pod.Spec.DNSPolicy)
|
||||
log := fmt.Sprintf("kubelet does not have ClusterDNS IP configured and cannot create Pod using %q policy. pod: %q. Falling back to DNSDefault policy.", pod.Spec.DNSPolicy, format.Pod(pod))
|
||||
kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, "MissingClusterDNS", log)
|
||||
|
||||
// fallback to DNSDefault
|
||||
useClusterFirstPolicy = false
|
||||
}
|
||||
|
||||
if !useClusterFirstPolicy {
|
||||
// When the kubelet --resolv-conf flag is set to the empty string, use
|
||||
// DNS settings that override the docker default (which is to use
|
||||
// /etc/resolv.conf) and effectively disable DNS lookups. According to
|
||||
// the bind documentation, the behavior of the DNS client library when
|
||||
// "nameservers" are not specified is to "use the nameserver on the
|
||||
// local machine". A nameserver setting of localhost is equivalent to
|
||||
// this documented behavior.
|
||||
if kl.resolverConfig == "" {
|
||||
hostDNS = []string{"127.0.0.1"}
|
||||
hostSearch = []string{"."}
|
||||
} else {
|
||||
hostSearch = kl.formDNSSearchForDNSDefault(hostSearch, pod)
|
||||
}
|
||||
return hostDNS, hostSearch, hostOptions, useClusterFirstPolicy, nil
|
||||
}
|
||||
|
||||
// for a pod with DNSClusterFirst policy, the cluster DNS server is the only nameserver configured for
|
||||
// the pod. The cluster DNS server itself will forward queries to other nameservers that is configured to use,
|
||||
// in case the cluster DNS server cannot resolve the DNS query itself
|
||||
dns := make([]string, len(kl.clusterDNS))
|
||||
for i, ip := range kl.clusterDNS {
|
||||
dns[i] = ip.String()
|
||||
}
|
||||
dnsSearch := kl.formDNSSearch(hostSearch, pod)
|
||||
|
||||
return dns, dnsSearch, hostOptions, useClusterFirstPolicy, nil
|
||||
}
|
||||
|
||||
// syncPod is the transaction script for the sync of a single pod.
|
||||
//
|
||||
// Arguments:
|
||||
@ -2224,36 +2164,6 @@ func (kl *Kubelet) cleanUpContainersInPod(podID types.UID, exitedContainerID str
|
||||
}
|
||||
}
|
||||
|
||||
// Replace the nameserver in containerized-mounter's rootfs/etc/resolve.conf with kubelet.ClusterDNS
|
||||
func (kl *Kubelet) setupDNSinContainerizedMounter(mounterPath string) {
|
||||
resolvePath := filepath.Join(strings.TrimSuffix(mounterPath, "/mounter"), "rootfs", "etc", "resolv.conf")
|
||||
dnsString := ""
|
||||
for _, dns := range kl.clusterDNS {
|
||||
dnsString = dnsString + fmt.Sprintf("nameserver %s\n", dns)
|
||||
}
|
||||
if kl.resolverConfig != "" {
|
||||
f, err := os.Open(kl.resolverConfig)
|
||||
defer f.Close()
|
||||
if err != nil {
|
||||
glog.Error("Could not open resolverConf file")
|
||||
} else {
|
||||
_, hostSearch, _, err := kl.parseResolvConf(f)
|
||||
if err != nil {
|
||||
glog.Errorf("Error for parsing the reslov.conf file: %v", err)
|
||||
} else {
|
||||
dnsString = dnsString + "search"
|
||||
for _, search := range hostSearch {
|
||||
dnsString = dnsString + fmt.Sprintf(" %s", search)
|
||||
}
|
||||
dnsString = dnsString + "\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := ioutil.WriteFile(resolvePath, []byte(dnsString), 0600); err != nil {
|
||||
glog.Errorf("Could not write dns nameserver in file %s, with error %v", resolvePath, err)
|
||||
}
|
||||
}
|
||||
|
||||
// isSyncPodWorthy filters out events that are not worthy of pod syncing
|
||||
func isSyncPodWorthy(event *pleg.PodLifecycleEvent) bool {
|
||||
// ContatnerRemoved doesn't affect pod state
|
||||
|
@ -21,13 +21,17 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
|
||||
)
|
||||
|
||||
@ -46,6 +50,77 @@ const (
|
||||
KubeFirewallChain utiliptables.Chain = "KUBE-FIREWALL"
|
||||
)
|
||||
|
||||
// This just exports required functions from kubelet proper, for use by network
|
||||
// plugins.
|
||||
// TODO(#35457): get rid of this backchannel to the kubelet. The scope of
|
||||
// the back channel is restricted to host-ports/testing, and restricted
|
||||
// to kubenet. No other network plugin wrapper needs it. Other plugins
|
||||
// only require a way to access namespace information, which they can do
|
||||
// directly through the methods implemented by criNetworkHost.
|
||||
type networkHost struct {
|
||||
kubelet *Kubelet
|
||||
}
|
||||
|
||||
func (nh *networkHost) GetPodByName(name, namespace string) (*v1.Pod, bool) {
|
||||
return nh.kubelet.GetPodByName(name, namespace)
|
||||
}
|
||||
|
||||
func (nh *networkHost) GetKubeClient() clientset.Interface {
|
||||
return nh.kubelet.kubeClient
|
||||
}
|
||||
|
||||
func (nh *networkHost) GetRuntime() kubecontainer.Runtime {
|
||||
return nh.kubelet.GetRuntime()
|
||||
}
|
||||
|
||||
func (nh *networkHost) SupportsLegacyFeatures() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// criNetworkHost implements the part of network.Host required by the
|
||||
// cri (NamespaceGetter). It leechs off networkHost for all other
|
||||
// methods, because networkHost is slated for deletion.
|
||||
type criNetworkHost struct {
|
||||
*networkHost
|
||||
// criNetworkHost currently support legacy features. Hence no need to support PortMappingGetter
|
||||
*network.NoopPortMappingGetter
|
||||
}
|
||||
|
||||
// GetNetNS returns the network namespace of the given containerID.
|
||||
// This method satisfies the network.NamespaceGetter interface for
|
||||
// networkHost. It's only meant to be used from network plugins
|
||||
// that are directly invoked by the kubelet (aka: legacy, pre-cri).
|
||||
// Any network plugin invoked by a cri must implement NamespaceGetter
|
||||
// to talk directly to the runtime instead.
|
||||
func (c *criNetworkHost) GetNetNS(containerID string) (string, error) {
|
||||
return c.kubelet.GetRuntime().GetNetNS(kubecontainer.ContainerID{Type: "", ID: containerID})
|
||||
}
|
||||
|
||||
// NoOpLegacyHost implements the network.LegacyHost interface for the remote
|
||||
// runtime shim by just returning empties. It doesn't support legacy features
|
||||
// like host port and bandwidth shaping.
|
||||
type NoOpLegacyHost struct{}
|
||||
|
||||
// GetPodByName always returns "nil, true" for 'NoOpLegacyHost'
|
||||
func (n *NoOpLegacyHost) GetPodByName(namespace, name string) (*v1.Pod, bool) {
|
||||
return nil, true
|
||||
}
|
||||
|
||||
// GetKubeClient always returns "nil" for 'NoOpLegacyHost'
|
||||
func (n *NoOpLegacyHost) GetKubeClient() clientset.Interface {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetRuntime always returns "nil" for 'NoOpLegacyHost'
|
||||
func (n *NoOpLegacyHost) GetRuntime() kubecontainer.Runtime {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SupportsLegacyFeatures always returns "false" for 'NoOpLegacyHost'
|
||||
func (n *NoOpLegacyHost) SupportsLegacyFeatures() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// effectiveHairpinMode determines the effective hairpin mode given the
|
||||
// configured mode, container runtime, and whether cbr0 should be configured.
|
||||
func effectiveHairpinMode(hairpinMode kubeletconfig.HairpinMode, containerRuntime string, networkPlugin string) (kubeletconfig.HairpinMode, error) {
|
||||
@ -89,7 +164,7 @@ func (kl *Kubelet) providerRequiresNetworkingConfiguration() bool {
|
||||
return supported
|
||||
}
|
||||
|
||||
func omitDuplicates(kl *Kubelet, pod *v1.Pod, combinedSearch []string) []string {
|
||||
func omitDuplicates(pod *v1.Pod, combinedSearch []string) []string {
|
||||
uniqueDomains := map[string]bool{}
|
||||
|
||||
for _, dnsDomain := range combinedSearch {
|
||||
@ -101,7 +176,7 @@ func omitDuplicates(kl *Kubelet, pod *v1.Pod, combinedSearch []string) []string
|
||||
return combinedSearch[:len(uniqueDomains)]
|
||||
}
|
||||
|
||||
func formDNSSearchFitsLimits(kl *Kubelet, pod *v1.Pod, composedSearch []string) []string {
|
||||
func (kl *Kubelet) formDNSSearchFitsLimits(pod *v1.Pod, composedSearch []string) []string {
|
||||
// resolver file Search line current limitations
|
||||
resolvSearchLineDNSDomainsLimit := 6
|
||||
resolvSearchLineLenLimit := 255
|
||||
@ -137,12 +212,12 @@ func formDNSSearchFitsLimits(kl *Kubelet, pod *v1.Pod, composedSearch []string)
|
||||
}
|
||||
|
||||
func (kl *Kubelet) formDNSSearchForDNSDefault(hostSearch []string, pod *v1.Pod) []string {
|
||||
return formDNSSearchFitsLimits(kl, pod, hostSearch)
|
||||
return kl.formDNSSearchFitsLimits(pod, hostSearch)
|
||||
}
|
||||
|
||||
func (kl *Kubelet) formDNSSearch(hostSearch []string, pod *v1.Pod) []string {
|
||||
if kl.clusterDomain == "" {
|
||||
formDNSSearchFitsLimits(kl, pod, hostSearch)
|
||||
kl.formDNSSearchFitsLimits(pod, hostSearch)
|
||||
return hostSearch
|
||||
}
|
||||
|
||||
@ -152,8 +227,8 @@ func (kl *Kubelet) formDNSSearch(hostSearch []string, pod *v1.Pod) []string {
|
||||
|
||||
combinedSearch := append(dnsSearch, hostSearch...)
|
||||
|
||||
combinedSearch = omitDuplicates(kl, pod, combinedSearch)
|
||||
return formDNSSearchFitsLimits(kl, pod, combinedSearch)
|
||||
combinedSearch = omitDuplicates(pod, combinedSearch)
|
||||
return kl.formDNSSearchFitsLimits(pod, combinedSearch)
|
||||
}
|
||||
|
||||
func (kl *Kubelet) checkLimitsForResolvConf() {
|
||||
@ -246,6 +321,94 @@ func (kl *Kubelet) parseResolvConf(reader io.Reader) (nameservers []string, sear
|
||||
return nameservers, searches, options, nil
|
||||
}
|
||||
|
||||
// GetClusterDNS returns a list of the DNS servers, a list of the DNS search
|
||||
// domains of the cluster, and a list of resolv.conf options.
|
||||
func (kl *Kubelet) GetClusterDNS(pod *v1.Pod) ([]string, []string, []string, bool, error) {
|
||||
var hostDNS, hostSearch, hostOptions []string
|
||||
// Get host DNS settings
|
||||
if kl.resolverConfig != "" {
|
||||
f, err := os.Open(kl.resolverConfig)
|
||||
if err != nil {
|
||||
return nil, nil, nil, false, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
hostDNS, hostSearch, hostOptions, err = kl.parseResolvConf(f)
|
||||
if err != nil {
|
||||
return nil, nil, nil, false, err
|
||||
}
|
||||
}
|
||||
useClusterFirstPolicy := ((pod.Spec.DNSPolicy == v1.DNSClusterFirst && !kubecontainer.IsHostNetworkPod(pod)) || pod.Spec.DNSPolicy == v1.DNSClusterFirstWithHostNet)
|
||||
if useClusterFirstPolicy && len(kl.clusterDNS) == 0 {
|
||||
// clusterDNS is not known.
|
||||
// pod with ClusterDNSFirst Policy cannot be created
|
||||
kl.recorder.Eventf(pod, v1.EventTypeWarning, "MissingClusterDNS", "kubelet does not have ClusterDNS IP configured and cannot create Pod using %q policy. Falling back to DNSDefault policy.", pod.Spec.DNSPolicy)
|
||||
log := fmt.Sprintf("kubelet does not have ClusterDNS IP configured and cannot create Pod using %q policy. pod: %q. Falling back to DNSDefault policy.", pod.Spec.DNSPolicy, format.Pod(pod))
|
||||
kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, "MissingClusterDNS", log)
|
||||
|
||||
// fallback to DNSDefault
|
||||
useClusterFirstPolicy = false
|
||||
}
|
||||
|
||||
if !useClusterFirstPolicy {
|
||||
// When the kubelet --resolv-conf flag is set to the empty string, use
|
||||
// DNS settings that override the docker default (which is to use
|
||||
// /etc/resolv.conf) and effectively disable DNS lookups. According to
|
||||
// the bind documentation, the behavior of the DNS client library when
|
||||
// "nameservers" are not specified is to "use the nameserver on the
|
||||
// local machine". A nameserver setting of localhost is equivalent to
|
||||
// this documented behavior.
|
||||
if kl.resolverConfig == "" {
|
||||
hostDNS = []string{"127.0.0.1"}
|
||||
hostSearch = []string{"."}
|
||||
} else {
|
||||
hostSearch = kl.formDNSSearchForDNSDefault(hostSearch, pod)
|
||||
}
|
||||
return hostDNS, hostSearch, hostOptions, useClusterFirstPolicy, nil
|
||||
}
|
||||
|
||||
// for a pod with DNSClusterFirst policy, the cluster DNS server is the only nameserver configured for
|
||||
// the pod. The cluster DNS server itself will forward queries to other nameservers that is configured to use,
|
||||
// in case the cluster DNS server cannot resolve the DNS query itself
|
||||
dns := make([]string, len(kl.clusterDNS))
|
||||
for i, ip := range kl.clusterDNS {
|
||||
dns[i] = ip.String()
|
||||
}
|
||||
dnsSearch := kl.formDNSSearch(hostSearch, pod)
|
||||
|
||||
return dns, dnsSearch, hostOptions, useClusterFirstPolicy, nil
|
||||
}
|
||||
|
||||
// Replace the nameserver in containerized-mounter's rootfs/etc/resolve.conf with kubelet.ClusterDNS
|
||||
func (kl *Kubelet) setupDNSinContainerizedMounter(mounterPath string) {
|
||||
resolvePath := filepath.Join(strings.TrimSuffix(mounterPath, "/mounter"), "rootfs", "etc", "resolv.conf")
|
||||
dnsString := ""
|
||||
for _, dns := range kl.clusterDNS {
|
||||
dnsString = dnsString + fmt.Sprintf("nameserver %s\n", dns)
|
||||
}
|
||||
if kl.resolverConfig != "" {
|
||||
f, err := os.Open(kl.resolverConfig)
|
||||
defer f.Close()
|
||||
if err != nil {
|
||||
glog.Error("Could not open resolverConf file")
|
||||
} else {
|
||||
_, hostSearch, _, err := kl.parseResolvConf(f)
|
||||
if err != nil {
|
||||
glog.Errorf("Error for parsing the reslov.conf file: %v", err)
|
||||
} else {
|
||||
dnsString = dnsString + "search"
|
||||
for _, search := range hostSearch {
|
||||
dnsString = dnsString + fmt.Sprintf(" %s", search)
|
||||
}
|
||||
dnsString = dnsString + "\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := ioutil.WriteFile(resolvePath, []byte(dnsString), 0600); err != nil {
|
||||
glog.Errorf("Could not write dns nameserver in file %s, with error %v", resolvePath, err)
|
||||
}
|
||||
}
|
||||
|
||||
// syncNetworkStatus updates the network state
|
||||
func (kl *Kubelet) syncNetworkStatus() {
|
||||
// For cri integration, network state will be updated in updateRuntimeUp,
|
||||
|
@ -28,6 +28,76 @@ import (
|
||||
"k8s.io/client-go/tools/record"
|
||||
)
|
||||
|
||||
func TestNetworkHostGetsPodNotFound(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, true)
|
||||
defer testKubelet.Cleanup()
|
||||
nh := networkHost{testKubelet.kubelet}
|
||||
|
||||
actualPod, _ := nh.GetPodByName("", "")
|
||||
if actualPod != nil {
|
||||
t.Fatalf("Was expected nil, received %v instead", actualPod)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNetworkHostGetsKubeClient(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, true)
|
||||
defer testKubelet.Cleanup()
|
||||
nh := networkHost{testKubelet.kubelet}
|
||||
|
||||
if nh.GetKubeClient() != testKubelet.fakeKubeClient {
|
||||
t.Fatalf("NetworkHost client does not match testKubelet's client")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNetworkHostGetsRuntime(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, true)
|
||||
defer testKubelet.Cleanup()
|
||||
nh := networkHost{testKubelet.kubelet}
|
||||
|
||||
if nh.GetRuntime() != testKubelet.fakeRuntime {
|
||||
t.Fatalf("NetworkHost runtime does not match testKubelet's runtime")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNetworkHostSupportsLegacyFeatures(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, true)
|
||||
defer testKubelet.Cleanup()
|
||||
nh := networkHost{testKubelet.kubelet}
|
||||
|
||||
if nh.SupportsLegacyFeatures() == false {
|
||||
t.Fatalf("SupportsLegacyFeatures should not be false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNoOpHostGetsName(t *testing.T) {
|
||||
nh := NoOpLegacyHost{}
|
||||
pod, err := nh.GetPodByName("", "")
|
||||
if pod != nil && err != true {
|
||||
t.Fatalf("noOpLegacyHost getpodbyname expected to be nil and true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNoOpHostGetsKubeClient(t *testing.T) {
|
||||
nh := NoOpLegacyHost{}
|
||||
if nh.GetKubeClient() != nil {
|
||||
t.Fatalf("noOpLegacyHost client expected to be nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNoOpHostGetsRuntime(t *testing.T) {
|
||||
nh := NoOpLegacyHost{}
|
||||
if nh.GetRuntime() != nil {
|
||||
t.Fatalf("noOpLegacyHost runtime expected to be nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNoOpHostSupportsLegacyFeatures(t *testing.T) {
|
||||
nh := NoOpLegacyHost{}
|
||||
if nh.SupportsLegacyFeatures() != false {
|
||||
t.Fatalf("noOpLegacyHost legacy features expected to be false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeIPParam(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
@ -183,6 +253,93 @@ func TestComposeDNSSearch(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetClusterDNS(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
|
||||
clusterNS := "203.0.113.1"
|
||||
kubelet.clusterDomain = "kubernetes.io"
|
||||
kubelet.clusterDNS = []net.IP{net.ParseIP(clusterNS)}
|
||||
|
||||
pods := newTestPods(4)
|
||||
pods[0].Spec.DNSPolicy = v1.DNSClusterFirstWithHostNet
|
||||
pods[1].Spec.DNSPolicy = v1.DNSClusterFirst
|
||||
pods[2].Spec.DNSPolicy = v1.DNSClusterFirst
|
||||
pods[2].Spec.HostNetwork = false
|
||||
pods[3].Spec.DNSPolicy = v1.DNSDefault
|
||||
|
||||
options := make([]struct {
|
||||
DNS []string
|
||||
DNSSearch []string
|
||||
}, 4)
|
||||
for i, pod := range pods {
|
||||
var err error
|
||||
options[i].DNS, options[i].DNSSearch, _, _, err = kubelet.GetClusterDNS(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to generate container options: %v", err)
|
||||
}
|
||||
}
|
||||
if len(options[0].DNS) != 1 || options[0].DNS[0] != clusterNS {
|
||||
t.Errorf("expected nameserver %s, got %+v", clusterNS, options[0].DNS)
|
||||
}
|
||||
if len(options[0].DNSSearch) == 0 || options[0].DNSSearch[0] != ".svc."+kubelet.clusterDomain {
|
||||
t.Errorf("expected search %s, got %+v", ".svc."+kubelet.clusterDomain, options[0].DNSSearch)
|
||||
}
|
||||
if len(options[1].DNS) != 1 || options[1].DNS[0] != "127.0.0.1" {
|
||||
t.Errorf("expected nameserver 127.0.0.1, got %+v", options[1].DNS)
|
||||
}
|
||||
if len(options[1].DNSSearch) != 1 || options[1].DNSSearch[0] != "." {
|
||||
t.Errorf("expected search \".\", got %+v", options[1].DNSSearch)
|
||||
}
|
||||
if len(options[2].DNS) != 1 || options[2].DNS[0] != clusterNS {
|
||||
t.Errorf("expected nameserver %s, got %+v", clusterNS, options[2].DNS)
|
||||
}
|
||||
if len(options[2].DNSSearch) == 0 || options[2].DNSSearch[0] != ".svc."+kubelet.clusterDomain {
|
||||
t.Errorf("expected search %s, got %+v", ".svc."+kubelet.clusterDomain, options[2].DNSSearch)
|
||||
}
|
||||
if len(options[3].DNS) != 1 || options[3].DNS[0] != "127.0.0.1" {
|
||||
t.Errorf("expected nameserver 127.0.0.1, got %+v", options[3].DNS)
|
||||
}
|
||||
if len(options[3].DNSSearch) != 1 || options[3].DNSSearch[0] != "." {
|
||||
t.Errorf("expected search \".\", got %+v", options[3].DNSSearch)
|
||||
}
|
||||
|
||||
kubelet.resolverConfig = "/etc/resolv.conf"
|
||||
for i, pod := range pods {
|
||||
var err error
|
||||
options[i].DNS, options[i].DNSSearch, _, _, err = kubelet.GetClusterDNS(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to generate container options: %v", err)
|
||||
}
|
||||
}
|
||||
t.Logf("nameservers %+v", options[1].DNS)
|
||||
if len(options[0].DNS) != 1 {
|
||||
t.Errorf("expected cluster nameserver only, got %+v", options[0].DNS)
|
||||
} else if options[0].DNS[0] != clusterNS {
|
||||
t.Errorf("expected nameserver %s, got %v", clusterNS, options[0].DNS[0])
|
||||
}
|
||||
expLength := len(options[1].DNSSearch) + 3
|
||||
if expLength > 6 {
|
||||
expLength = 6
|
||||
}
|
||||
if len(options[0].DNSSearch) != expLength {
|
||||
t.Errorf("expected prepend of cluster domain, got %+v", options[0].DNSSearch)
|
||||
} else if options[0].DNSSearch[0] != ".svc."+kubelet.clusterDomain {
|
||||
t.Errorf("expected domain %s, got %s", ".svc."+kubelet.clusterDomain, options[0].DNSSearch)
|
||||
}
|
||||
if len(options[2].DNS) != 1 {
|
||||
t.Errorf("expected cluster nameserver only, got %+v", options[2].DNS)
|
||||
} else if options[2].DNS[0] != clusterNS {
|
||||
t.Errorf("expected nameserver %s, got %v", clusterNS, options[2].DNS[0])
|
||||
}
|
||||
if len(options[2].DNSSearch) != expLength {
|
||||
t.Errorf("expected prepend of cluster domain, got %+v", options[2].DNSSearch)
|
||||
} else if options[2].DNSSearch[0] != ".svc."+kubelet.clusterDomain {
|
||||
t.Errorf("expected domain %s, got %s", ".svc."+kubelet.clusterDomain, options[0].DNSSearch)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetIPTablesMark(t *testing.T) {
|
||||
tests := []struct {
|
||||
bit int
|
||||
|
@ -19,7 +19,6 @@ package kubelet
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"sort"
|
||||
"testing"
|
||||
@ -2171,90 +2170,3 @@ type podsByUID []*v1.Pod
|
||||
func (p podsByUID) Len() int { return len(p) }
|
||||
func (p podsByUID) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
func (p podsByUID) Less(i, j int) bool { return p[i].UID < p[j].UID }
|
||||
|
||||
func TestGetClusterDNS(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
|
||||
clusterNS := "203.0.113.1"
|
||||
kubelet.clusterDomain = "kubernetes.io"
|
||||
kubelet.clusterDNS = []net.IP{net.ParseIP(clusterNS)}
|
||||
|
||||
pods := newTestPods(4)
|
||||
pods[0].Spec.DNSPolicy = v1.DNSClusterFirstWithHostNet
|
||||
pods[1].Spec.DNSPolicy = v1.DNSClusterFirst
|
||||
pods[2].Spec.DNSPolicy = v1.DNSClusterFirst
|
||||
pods[2].Spec.HostNetwork = false
|
||||
pods[3].Spec.DNSPolicy = v1.DNSDefault
|
||||
|
||||
options := make([]struct {
|
||||
DNS []string
|
||||
DNSSearch []string
|
||||
}, 4)
|
||||
for i, pod := range pods {
|
||||
var err error
|
||||
options[i].DNS, options[i].DNSSearch, _, _, err = kubelet.GetClusterDNS(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to generate container options: %v", err)
|
||||
}
|
||||
}
|
||||
if len(options[0].DNS) != 1 || options[0].DNS[0] != clusterNS {
|
||||
t.Errorf("expected nameserver %s, got %+v", clusterNS, options[0].DNS)
|
||||
}
|
||||
if len(options[0].DNSSearch) == 0 || options[0].DNSSearch[0] != ".svc."+kubelet.clusterDomain {
|
||||
t.Errorf("expected search %s, got %+v", ".svc."+kubelet.clusterDomain, options[0].DNSSearch)
|
||||
}
|
||||
if len(options[1].DNS) != 1 || options[1].DNS[0] != "127.0.0.1" {
|
||||
t.Errorf("expected nameserver 127.0.0.1, got %+v", options[1].DNS)
|
||||
}
|
||||
if len(options[1].DNSSearch) != 1 || options[1].DNSSearch[0] != "." {
|
||||
t.Errorf("expected search \".\", got %+v", options[1].DNSSearch)
|
||||
}
|
||||
if len(options[2].DNS) != 1 || options[2].DNS[0] != clusterNS {
|
||||
t.Errorf("expected nameserver %s, got %+v", clusterNS, options[2].DNS)
|
||||
}
|
||||
if len(options[2].DNSSearch) == 0 || options[2].DNSSearch[0] != ".svc."+kubelet.clusterDomain {
|
||||
t.Errorf("expected search %s, got %+v", ".svc."+kubelet.clusterDomain, options[2].DNSSearch)
|
||||
}
|
||||
if len(options[3].DNS) != 1 || options[3].DNS[0] != "127.0.0.1" {
|
||||
t.Errorf("expected nameserver 127.0.0.1, got %+v", options[3].DNS)
|
||||
}
|
||||
if len(options[3].DNSSearch) != 1 || options[3].DNSSearch[0] != "." {
|
||||
t.Errorf("expected search \".\", got %+v", options[3].DNSSearch)
|
||||
}
|
||||
|
||||
kubelet.resolverConfig = "/etc/resolv.conf"
|
||||
for i, pod := range pods {
|
||||
var err error
|
||||
options[i].DNS, options[i].DNSSearch, _, _, err = kubelet.GetClusterDNS(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to generate container options: %v", err)
|
||||
}
|
||||
}
|
||||
t.Logf("nameservers %+v", options[1].DNS)
|
||||
if len(options[0].DNS) != 1 {
|
||||
t.Errorf("expected cluster nameserver only, got %+v", options[0].DNS)
|
||||
} else if options[0].DNS[0] != clusterNS {
|
||||
t.Errorf("expected nameserver %s, got %v", clusterNS, options[0].DNS[0])
|
||||
}
|
||||
expLength := len(options[1].DNSSearch) + 3
|
||||
if expLength > 6 {
|
||||
expLength = 6
|
||||
}
|
||||
if len(options[0].DNSSearch) != expLength {
|
||||
t.Errorf("expected prepend of cluster domain, got %+v", options[0].DNSSearch)
|
||||
} else if options[0].DNSSearch[0] != ".svc."+kubelet.clusterDomain {
|
||||
t.Errorf("expected domain %s, got %s", ".svc."+kubelet.clusterDomain, options[0].DNSSearch)
|
||||
}
|
||||
if len(options[2].DNS) != 1 {
|
||||
t.Errorf("expected cluster nameserver only, got %+v", options[2].DNS)
|
||||
} else if options[2].DNS[0] != clusterNS {
|
||||
t.Errorf("expected nameserver %s, got %v", clusterNS, options[2].DNS[0])
|
||||
}
|
||||
if len(options[2].DNSSearch) != expLength {
|
||||
t.Errorf("expected prepend of cluster domain, got %+v", options[2].DNSSearch)
|
||||
} else if options[2].DNSSearch[0] != ".svc."+kubelet.clusterDomain {
|
||||
t.Errorf("expected domain %s, got %s", ".svc."+kubelet.clusterDomain, options[0].DNSSearch)
|
||||
}
|
||||
}
|
||||
|
@ -1,95 +0,0 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||
)
|
||||
|
||||
// This just exports required functions from kubelet proper, for use by network
|
||||
// plugins.
|
||||
// TODO(#35457): get rid of this backchannel to the kubelet. The scope of
|
||||
// the back channel is restricted to host-ports/testing, and restricted
|
||||
// to kubenet. No other network plugin wrapper needs it. Other plugins
|
||||
// only require a way to access namespace information, which they can do
|
||||
// directly through the methods implemented by criNetworkHost.
|
||||
type networkHost struct {
|
||||
kubelet *Kubelet
|
||||
}
|
||||
|
||||
func (nh *networkHost) GetPodByName(name, namespace string) (*v1.Pod, bool) {
|
||||
return nh.kubelet.GetPodByName(name, namespace)
|
||||
}
|
||||
|
||||
func (nh *networkHost) GetKubeClient() clientset.Interface {
|
||||
return nh.kubelet.kubeClient
|
||||
}
|
||||
|
||||
func (nh *networkHost) GetRuntime() kubecontainer.Runtime {
|
||||
return nh.kubelet.GetRuntime()
|
||||
}
|
||||
|
||||
func (nh *networkHost) SupportsLegacyFeatures() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// criNetworkHost implements the part of network.Host required by the
|
||||
// cri (NamespaceGetter). It leechs off networkHost for all other
|
||||
// methods, because networkHost is slated for deletion.
|
||||
type criNetworkHost struct {
|
||||
*networkHost
|
||||
// criNetworkHost currently support legacy features. Hence no need to support PortMappingGetter
|
||||
*network.NoopPortMappingGetter
|
||||
}
|
||||
|
||||
// GetNetNS returns the network namespace of the given containerID.
|
||||
// This method satisfies the network.NamespaceGetter interface for
|
||||
// networkHost. It's only meant to be used from network plugins
|
||||
// that are directly invoked by the kubelet (aka: legacy, pre-cri).
|
||||
// Any network plugin invoked by a cri must implement NamespaceGetter
|
||||
// to talk directly to the runtime instead.
|
||||
func (c *criNetworkHost) GetNetNS(containerID string) (string, error) {
|
||||
return c.kubelet.GetRuntime().GetNetNS(kubecontainer.ContainerID{Type: "", ID: containerID})
|
||||
}
|
||||
|
||||
// NoOpLegacyHost implements the network.LegacyHost interface for the remote
|
||||
// runtime shim by just returning empties. It doesn't support legacy features
|
||||
// like host port and bandwidth shaping.
|
||||
type NoOpLegacyHost struct{}
|
||||
|
||||
// GetPodByName always returns "nil, true" for 'NoOpLegacyHost'
|
||||
func (n *NoOpLegacyHost) GetPodByName(namespace, name string) (*v1.Pod, bool) {
|
||||
return nil, true
|
||||
}
|
||||
|
||||
// GetKubeClient always returns "nil" for 'NoOpLegacyHost'
|
||||
func (n *NoOpLegacyHost) GetKubeClient() clientset.Interface {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetRuntime always returns "nil" for 'NoOpLegacyHost'
|
||||
func (n *NoOpLegacyHost) GetRuntime() kubecontainer.Runtime {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SupportsLegacyFeatures always returns "false" for 'NoOpLegacyHost'
|
||||
func (n *NoOpLegacyHost) SupportsLegacyFeatures() bool {
|
||||
return false
|
||||
}
|
@ -1,91 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNetworkHostGetsPodNotFound(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, true)
|
||||
defer testKubelet.Cleanup()
|
||||
nh := networkHost{testKubelet.kubelet}
|
||||
|
||||
actualPod, _ := nh.GetPodByName("", "")
|
||||
if actualPod != nil {
|
||||
t.Fatalf("Was expected nil, received %v instead", actualPod)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNetworkHostGetsKubeClient(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, true)
|
||||
defer testKubelet.Cleanup()
|
||||
nh := networkHost{testKubelet.kubelet}
|
||||
|
||||
if nh.GetKubeClient() != testKubelet.fakeKubeClient {
|
||||
t.Fatalf("NetworkHost client does not match testKubelet's client")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNetworkHostGetsRuntime(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, true)
|
||||
defer testKubelet.Cleanup()
|
||||
nh := networkHost{testKubelet.kubelet}
|
||||
|
||||
if nh.GetRuntime() != testKubelet.fakeRuntime {
|
||||
t.Fatalf("NetworkHost runtime does not match testKubelet's runtime")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNetworkHostSupportsLegacyFeatures(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, true)
|
||||
defer testKubelet.Cleanup()
|
||||
nh := networkHost{testKubelet.kubelet}
|
||||
|
||||
if nh.SupportsLegacyFeatures() == false {
|
||||
t.Fatalf("SupportsLegacyFeatures should not be false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNoOpHostGetsName(t *testing.T) {
|
||||
nh := NoOpLegacyHost{}
|
||||
pod, err := nh.GetPodByName("", "")
|
||||
if pod != nil && err != true {
|
||||
t.Fatalf("noOpLegacyHost getpodbyname expected to be nil and true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNoOpHostGetsKubeClient(t *testing.T) {
|
||||
nh := NoOpLegacyHost{}
|
||||
if nh.GetKubeClient() != nil {
|
||||
t.Fatalf("noOpLegacyHost client expected to be nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNoOpHostGetsRuntime(t *testing.T) {
|
||||
nh := NoOpLegacyHost{}
|
||||
if nh.GetRuntime() != nil {
|
||||
t.Fatalf("noOpLegacyHost runtime expected to be nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNoOpHostSupportsLegacyFeatures(t *testing.T) {
|
||||
nh := NoOpLegacyHost{}
|
||||
if nh.SupportsLegacyFeatures() != false {
|
||||
t.Fatalf("noOpLegacyHost legacy features expected to be false")
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user