mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-01 07:47:56 +00:00
Extract kubelet network code into its own file
This commit is contained in:
parent
5f2460b58c
commit
5c836f3582
@ -545,40 +545,6 @@ func NewMainKubelet(
|
|||||||
return klet, nil
|
return klet, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// effectiveHairpinMode determines the effective hairpin mode given the
|
|
||||||
// configured mode, container runtime, and whether cbr0 should be configured.
|
|
||||||
func effectiveHairpinMode(hairpinMode componentconfig.HairpinMode, containerRuntime string, configureCBR0 bool, networkPlugin string) (componentconfig.HairpinMode, error) {
|
|
||||||
// The hairpin mode setting doesn't matter if:
|
|
||||||
// - We're not using a bridge network. This is hard to check because we might
|
|
||||||
// be using a plugin. It matters if --configure-cbr0=true, and we currently
|
|
||||||
// don't pipe it down to any plugins.
|
|
||||||
// - It's set to hairpin-veth for a container runtime that doesn't know how
|
|
||||||
// to set the hairpin flag on the veth's of containers. Currently the
|
|
||||||
// docker runtime is the only one that understands this.
|
|
||||||
// - It's set to "none".
|
|
||||||
if hairpinMode == componentconfig.PromiscuousBridge || hairpinMode == componentconfig.HairpinVeth {
|
|
||||||
// Only on docker.
|
|
||||||
if containerRuntime != "docker" {
|
|
||||||
glog.Warningf("Hairpin mode set to %q but container runtime is %q, ignoring", hairpinMode, containerRuntime)
|
|
||||||
return componentconfig.HairpinNone, nil
|
|
||||||
}
|
|
||||||
if hairpinMode == componentconfig.PromiscuousBridge && !configureCBR0 && networkPlugin != "kubenet" {
|
|
||||||
// This is not a valid combination. Users might be using the
|
|
||||||
// default values (from before the hairpin-mode flag existed) and we
|
|
||||||
// should keep the old behavior.
|
|
||||||
glog.Warningf("Hairpin mode set to %q but configureCBR0 is false, falling back to %q", hairpinMode, componentconfig.HairpinVeth)
|
|
||||||
return componentconfig.HairpinVeth, nil
|
|
||||||
}
|
|
||||||
} else if hairpinMode == componentconfig.HairpinNone {
|
|
||||||
if configureCBR0 {
|
|
||||||
glog.Warningf("Hairpin mode set to %q and configureCBR0 is true, this might result in loss of hairpin packets", hairpinMode)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return "", fmt.Errorf("unknown value: %q", hairpinMode)
|
|
||||||
}
|
|
||||||
return hairpinMode, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type serviceLister interface {
|
type serviceLister interface {
|
||||||
List() (api.ServiceList, error)
|
List() (api.ServiceList, error)
|
||||||
}
|
}
|
||||||
@ -853,39 +819,6 @@ type Kubelet struct {
|
|||||||
enableControllerAttachDetach bool
|
enableControllerAttachDetach bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate given node IP belongs to the current host
|
|
||||||
func (kl *Kubelet) validateNodeIP() error {
|
|
||||||
if kl.nodeIP == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Honor IP limitations set in setNodeStatus()
|
|
||||||
if kl.nodeIP.IsLoopback() {
|
|
||||||
return fmt.Errorf("nodeIP can't be loopback address")
|
|
||||||
}
|
|
||||||
if kl.nodeIP.To4() == nil {
|
|
||||||
return fmt.Errorf("nodeIP must be IPv4 address")
|
|
||||||
}
|
|
||||||
|
|
||||||
addrs, err := net.InterfaceAddrs()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, addr := range addrs {
|
|
||||||
var ip net.IP
|
|
||||||
switch v := addr.(type) {
|
|
||||||
case *net.IPNet:
|
|
||||||
ip = v.IP
|
|
||||||
case *net.IPAddr:
|
|
||||||
ip = v.IP
|
|
||||||
}
|
|
||||||
if ip != nil && ip.Equal(kl.nodeIP) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fmt.Errorf("Node IP: %q not found in the host's network interfaces", kl.nodeIP.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
// dirExists returns true if the path exists and represents a directory.
|
// dirExists returns true if the path exists and represents a directory.
|
||||||
func dirExists(path string) bool {
|
func dirExists(path string) bool {
|
||||||
s, err := os.Stat(path)
|
s, err := os.Stat(path)
|
||||||
@ -1141,18 +1074,6 @@ func (kl *Kubelet) initialNodeStatus() (*api.Node, error) {
|
|||||||
return node, nil
|
return node, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kl *Kubelet) providerRequiresNetworkingConfiguration() bool {
|
|
||||||
// TODO: We should have a mechanism to say whether native cloud provider
|
|
||||||
// is used or whether we are using overlay networking. We should return
|
|
||||||
// true for cloud providers if they implement Routes() interface and
|
|
||||||
// we are not using overlay networking.
|
|
||||||
if kl.cloud == nil || kl.cloud.ProviderName() != "gce" || kl.flannelExperimentalOverlay {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
_, supported := kl.cloud.Routes()
|
|
||||||
return supported
|
|
||||||
}
|
|
||||||
|
|
||||||
// registerWithApiserver registers the node with the cluster master. It is safe
|
// registerWithApiserver registers the node with the cluster master. It is safe
|
||||||
// to call multiple times, but not concurrently (kl.registrationCompleted is
|
// to call multiple times, but not concurrently (kl.registrationCompleted is
|
||||||
// not locked).
|
// not locked).
|
||||||
@ -1718,61 +1639,6 @@ func (kl *Kubelet) GetClusterDNS(pod *api.Pod) ([]string, []string, error) {
|
|||||||
return dns, dnsSearch, nil
|
return dns, dnsSearch, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the list of DNS servers and DNS search domains.
|
|
||||||
func (kl *Kubelet) parseResolvConf(reader io.Reader) (nameservers []string, searches []string, err error) {
|
|
||||||
var scrubber dnsScrubber
|
|
||||||
if kl.cloud != nil {
|
|
||||||
scrubber = kl.cloud
|
|
||||||
}
|
|
||||||
return parseResolvConf(reader, scrubber)
|
|
||||||
}
|
|
||||||
|
|
||||||
// A helper for testing.
|
|
||||||
type dnsScrubber interface {
|
|
||||||
ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string)
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseResolveConf reads a resolv.conf file from the given reader, and parses
|
|
||||||
// it into nameservers and searches, possibly returning an error. The given
|
|
||||||
// dnsScrubber allows cloud providers to post-process dns names.
|
|
||||||
// TODO: move to utility package
|
|
||||||
func parseResolvConf(reader io.Reader, dnsScrubber dnsScrubber) (nameservers []string, searches []string, err error) {
|
|
||||||
file, err := ioutil.ReadAll(reader)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Lines of the form "nameserver 1.2.3.4" accumulate.
|
|
||||||
nameservers = []string{}
|
|
||||||
|
|
||||||
// Lines of the form "search example.com" overrule - last one wins.
|
|
||||||
searches = []string{}
|
|
||||||
|
|
||||||
lines := strings.Split(string(file), "\n")
|
|
||||||
for l := range lines {
|
|
||||||
trimmed := strings.TrimSpace(lines[l])
|
|
||||||
if strings.HasPrefix(trimmed, "#") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
fields := strings.Fields(trimmed)
|
|
||||||
if len(fields) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if fields[0] == "nameserver" {
|
|
||||||
nameservers = append(nameservers, fields[1:]...)
|
|
||||||
}
|
|
||||||
if fields[0] == "search" {
|
|
||||||
searches = fields[1:]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Give the cloud-provider a chance to post-process DNS settings.
|
|
||||||
if dnsScrubber != nil {
|
|
||||||
nameservers, searches = dnsScrubber.ScrubDNS(nameservers, searches)
|
|
||||||
}
|
|
||||||
return nameservers, searches, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// One of the following aruguements must be non-nil: runningPod, status.
|
// One of the following aruguements must be non-nil: runningPod, status.
|
||||||
// TODO: Modify containerRuntime.KillPod() to accept the right arguments.
|
// TODO: Modify containerRuntime.KillPod() to accept the right arguments.
|
||||||
func (kl *Kubelet) killPod(pod *api.Pod, runningPod *kubecontainer.Pod, status *kubecontainer.PodStatus, gracePeriodOverride *int64) error {
|
func (kl *Kubelet) killPod(pod *api.Pod, runningPod *kubecontainer.Pod, status *kubecontainer.PodStatus, gracePeriodOverride *int64) error {
|
||||||
@ -2039,51 +1905,6 @@ func (kl *Kubelet) cleanupOrphanedPodDirs(
|
|||||||
return utilerrors.NewAggregate(errlist)
|
return utilerrors.NewAggregate(errlist)
|
||||||
}
|
}
|
||||||
|
|
||||||
// cleanupBandwidthLimits updates the status of bandwidth-limited containers
|
|
||||||
// and ensures that only the the appropriate CIDRs are active on the node.
|
|
||||||
func (kl *Kubelet) cleanupBandwidthLimits(allPods []*api.Pod) error {
|
|
||||||
if kl.shaper == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
currentCIDRs, err := kl.shaper.GetCIDRs()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
possibleCIDRs := sets.String{}
|
|
||||||
for ix := range allPods {
|
|
||||||
pod := allPods[ix]
|
|
||||||
ingress, egress, err := bandwidth.ExtractPodBandwidthResources(pod.Annotations)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if ingress == nil && egress == nil {
|
|
||||||
glog.V(8).Infof("Not a bandwidth limited container...")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
status, found := kl.statusManager.GetPodStatus(pod.UID)
|
|
||||||
if !found {
|
|
||||||
// TODO(random-liu): Cleanup status get functions. (issue #20477)
|
|
||||||
s, err := kl.containerRuntime.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
status = kl.generateAPIPodStatus(pod, s)
|
|
||||||
}
|
|
||||||
if status.Phase == api.PodRunning {
|
|
||||||
possibleCIDRs.Insert(fmt.Sprintf("%s/32", status.PodIP))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, cidr := range currentCIDRs {
|
|
||||||
if !possibleCIDRs.Has(cidr) {
|
|
||||||
glog.V(2).Infof("Removing CIDR: %s (%v)", cidr, possibleCIDRs)
|
|
||||||
if err := kl.shaper.Reset(cidr); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get pods which should be resynchronized. Currently, the following pod should be resynchronized:
|
// Get pods which should be resynchronized. Currently, the following pod should be resynchronized:
|
||||||
// * pod whose work is ready.
|
// * pod whose work is ready.
|
||||||
// * internal modules that request sync of a pod.
|
// * internal modules that request sync of a pod.
|
||||||
@ -2820,33 +2641,6 @@ func (kl *Kubelet) updateRuntimeUp() {
|
|||||||
kl.runtimeState.setRuntimeSync(kl.clock.Now())
|
kl.runtimeState.setRuntimeSync(kl.clock.Now())
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: remove when kubenet plugin is ready
|
|
||||||
// NOTE!!! if you make changes here, also make them to kubenet
|
|
||||||
func (kl *Kubelet) reconcileCBR0(podCIDR string) error {
|
|
||||||
if podCIDR == "" {
|
|
||||||
glog.V(5).Info("PodCIDR not set. Will not configure cbr0.")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
glog.V(5).Infof("PodCIDR is set to %q", podCIDR)
|
|
||||||
_, cidr, err := net.ParseCIDR(podCIDR)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Set cbr0 interface address to first address in IPNet
|
|
||||||
cidr.IP.To4()[3] += 1
|
|
||||||
if err := ensureCbr0(cidr, kl.hairpinMode == componentconfig.PromiscuousBridge, kl.babysitDaemons); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if kl.shapingEnabled() {
|
|
||||||
if kl.shaper == nil {
|
|
||||||
glog.V(5).Info("Shaper is nil, creating")
|
|
||||||
kl.shaper = bandwidth.NewTCShaper("cbr0")
|
|
||||||
}
|
|
||||||
return kl.shaper.ReconcileInterface()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// updateNodeStatus updates node status to master with retries.
|
// updateNodeStatus updates node status to master with retries.
|
||||||
func (kl *Kubelet) updateNodeStatus() error {
|
func (kl *Kubelet) updateNodeStatus() error {
|
||||||
for i := 0; i < nodeStatusUpdateRetry; i++ {
|
for i := 0; i < nodeStatusUpdateRetry; i++ {
|
||||||
@ -2868,45 +2662,6 @@ func (kl *Kubelet) recordNodeStatusEvent(eventtype, event string) {
|
|||||||
kl.recorder.Eventf(kl.nodeRef, eventtype, event, "Node %s status is now: %s", kl.nodeName, event)
|
kl.recorder.Eventf(kl.nodeRef, eventtype, event, "Node %s status is now: %s", kl.nodeName, event)
|
||||||
}
|
}
|
||||||
|
|
||||||
// syncNetworkStatus updates the network state, ensuring that the network is
|
|
||||||
// configured correctly if the kubelet is set to configure cbr0:
|
|
||||||
// * handshake flannel helper if the flannel experimental overlay is being used.
|
|
||||||
// * ensure that iptables masq rules are setup
|
|
||||||
// * reconcile cbr0 with the pod CIDR
|
|
||||||
func (kl *Kubelet) syncNetworkStatus() {
|
|
||||||
var err error
|
|
||||||
if kl.configureCBR0 {
|
|
||||||
if kl.flannelExperimentalOverlay {
|
|
||||||
podCIDR, err := kl.flannelHelper.Handshake()
|
|
||||||
if err != nil {
|
|
||||||
glog.Infof("Flannel server handshake failed %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
kl.updatePodCIDR(podCIDR)
|
|
||||||
}
|
|
||||||
if err := ensureIPTablesMasqRule(kl.nonMasqueradeCIDR); err != nil {
|
|
||||||
err = fmt.Errorf("Error on adding ip table rules: %v", err)
|
|
||||||
glog.Error(err)
|
|
||||||
kl.runtimeState.setNetworkState(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
podCIDR := kl.runtimeState.podCIDR()
|
|
||||||
if len(podCIDR) == 0 {
|
|
||||||
err = fmt.Errorf("ConfigureCBR0 requested, but PodCIDR not set. Will not configure CBR0 right now")
|
|
||||||
glog.Warning(err)
|
|
||||||
} else if err = kl.reconcileCBR0(podCIDR); err != nil {
|
|
||||||
err = fmt.Errorf("Error configuring cbr0: %v", err)
|
|
||||||
glog.Error(err)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
kl.runtimeState.setNetworkState(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
kl.runtimeState.setNetworkState(kl.networkPlugin.Status())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set addresses for the node.
|
// Set addresses for the node.
|
||||||
func (kl *Kubelet) setNodeAddress(node *api.Node) error {
|
func (kl *Kubelet) setNodeAddress(node *api.Node) error {
|
||||||
// Set addresses for the node.
|
// Set addresses for the node.
|
||||||
@ -3826,29 +3581,3 @@ func (kl *Kubelet) ListenAndServe(address net.IP, port uint, tlsOptions *server.
|
|||||||
func (kl *Kubelet) ListenAndServeReadOnly(address net.IP, port uint) {
|
func (kl *Kubelet) ListenAndServeReadOnly(address net.IP, port uint) {
|
||||||
server.ListenAndServeKubeletReadOnlyServer(kl, kl.resourceAnalyzer, address, port, kl.containerRuntime)
|
server.ListenAndServeKubeletReadOnlyServer(kl, kl.resourceAnalyzer, address, port, kl.containerRuntime)
|
||||||
}
|
}
|
||||||
|
|
||||||
// updatePodCIDR updates the pod CIDR in the runtime state if it is different
|
|
||||||
// from the current CIDR.
|
|
||||||
func (kl *Kubelet) updatePodCIDR(cidr string) {
|
|
||||||
if kl.runtimeState.podCIDR() == cidr {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
glog.Infof("Setting Pod CIDR: %v -> %v", kl.runtimeState.podCIDR(), cidr)
|
|
||||||
kl.runtimeState.setPodCIDR(cidr)
|
|
||||||
|
|
||||||
if kl.networkPlugin != nil {
|
|
||||||
details := make(map[string]interface{})
|
|
||||||
details[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR] = cidr
|
|
||||||
kl.networkPlugin.Event(network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE, details)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// shapingEnabled returns whether traffic shaping is enabled.
|
|
||||||
func (kl *Kubelet) shapingEnabled() bool {
|
|
||||||
// Disable shaping if a network plugin is defined and supports shaping
|
|
||||||
if kl.networkPlugin != nil && kl.networkPlugin.Capabilities().Has(network.NET_PLUGIN_CAPABILITY_SHAPING) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
305
pkg/kubelet/kubelet_network.go
Normal file
305
pkg/kubelet/kubelet_network.go
Normal file
@ -0,0 +1,305 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package kubelet
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
"k8s.io/kubernetes/pkg/api"
|
||||||
|
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||||
|
"k8s.io/kubernetes/pkg/util/bandwidth"
|
||||||
|
"k8s.io/kubernetes/pkg/util/sets"
|
||||||
|
)
|
||||||
|
|
||||||
|
// effectiveHairpinMode determines the effective hairpin mode given the
|
||||||
|
// configured mode, container runtime, and whether cbr0 should be configured.
|
||||||
|
func effectiveHairpinMode(hairpinMode componentconfig.HairpinMode, containerRuntime string, configureCBR0 bool, networkPlugin string) (componentconfig.HairpinMode, error) {
|
||||||
|
// The hairpin mode setting doesn't matter if:
|
||||||
|
// - We're not using a bridge network. This is hard to check because we might
|
||||||
|
// be using a plugin. It matters if --configure-cbr0=true, and we currently
|
||||||
|
// don't pipe it down to any plugins.
|
||||||
|
// - It's set to hairpin-veth for a container runtime that doesn't know how
|
||||||
|
// to set the hairpin flag on the veth's of containers. Currently the
|
||||||
|
// docker runtime is the only one that understands this.
|
||||||
|
// - It's set to "none".
|
||||||
|
if hairpinMode == componentconfig.PromiscuousBridge || hairpinMode == componentconfig.HairpinVeth {
|
||||||
|
// Only on docker.
|
||||||
|
if containerRuntime != "docker" {
|
||||||
|
glog.Warningf("Hairpin mode set to %q but container runtime is %q, ignoring", hairpinMode, containerRuntime)
|
||||||
|
return componentconfig.HairpinNone, nil
|
||||||
|
}
|
||||||
|
if hairpinMode == componentconfig.PromiscuousBridge && !configureCBR0 && networkPlugin != "kubenet" {
|
||||||
|
// This is not a valid combination. Users might be using the
|
||||||
|
// default values (from before the hairpin-mode flag existed) and we
|
||||||
|
// should keep the old behavior.
|
||||||
|
glog.Warningf("Hairpin mode set to %q but configureCBR0 is false, falling back to %q", hairpinMode, componentconfig.HairpinVeth)
|
||||||
|
return componentconfig.HairpinVeth, nil
|
||||||
|
}
|
||||||
|
} else if hairpinMode == componentconfig.HairpinNone {
|
||||||
|
if configureCBR0 {
|
||||||
|
glog.Warningf("Hairpin mode set to %q and configureCBR0 is true, this might result in loss of hairpin packets", hairpinMode)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return "", fmt.Errorf("unknown value: %q", hairpinMode)
|
||||||
|
}
|
||||||
|
return hairpinMode, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate given node IP belongs to the current host
|
||||||
|
func (kl *Kubelet) validateNodeIP() error {
|
||||||
|
if kl.nodeIP == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Honor IP limitations set in setNodeStatus()
|
||||||
|
if kl.nodeIP.IsLoopback() {
|
||||||
|
return fmt.Errorf("nodeIP can't be loopback address")
|
||||||
|
}
|
||||||
|
if kl.nodeIP.To4() == nil {
|
||||||
|
return fmt.Errorf("nodeIP must be IPv4 address")
|
||||||
|
}
|
||||||
|
|
||||||
|
addrs, err := net.InterfaceAddrs()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, addr := range addrs {
|
||||||
|
var ip net.IP
|
||||||
|
switch v := addr.(type) {
|
||||||
|
case *net.IPNet:
|
||||||
|
ip = v.IP
|
||||||
|
case *net.IPAddr:
|
||||||
|
ip = v.IP
|
||||||
|
}
|
||||||
|
if ip != nil && ip.Equal(kl.nodeIP) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fmt.Errorf("Node IP: %q not found in the host's network interfaces", kl.nodeIP.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// providerRequiresNetworkingConfiguration returns whether the cloud provider
|
||||||
|
// requires special networking configuration.
|
||||||
|
func (kl *Kubelet) providerRequiresNetworkingConfiguration() bool {
|
||||||
|
// TODO: We should have a mechanism to say whether native cloud provider
|
||||||
|
// is used or whether we are using overlay networking. We should return
|
||||||
|
// true for cloud providers if they implement Routes() interface and
|
||||||
|
// we are not using overlay networking.
|
||||||
|
if kl.cloud == nil || kl.cloud.ProviderName() != "gce" || kl.flannelExperimentalOverlay {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
_, supported := kl.cloud.Routes()
|
||||||
|
return supported
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the list of DNS servers and DNS search domains.
|
||||||
|
func (kl *Kubelet) parseResolvConf(reader io.Reader) (nameservers []string, searches []string, err error) {
|
||||||
|
var scrubber dnsScrubber
|
||||||
|
if kl.cloud != nil {
|
||||||
|
scrubber = kl.cloud
|
||||||
|
}
|
||||||
|
return parseResolvConf(reader, scrubber)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A helper for testing.
|
||||||
|
type dnsScrubber interface {
|
||||||
|
ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string)
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseResolveConf reads a resolv.conf file from the given reader, and parses
|
||||||
|
// it into nameservers and searches, possibly returning an error. The given
|
||||||
|
// dnsScrubber allows cloud providers to post-process dns names.
|
||||||
|
// TODO: move to utility package
|
||||||
|
func parseResolvConf(reader io.Reader, dnsScrubber dnsScrubber) (nameservers []string, searches []string, err error) {
|
||||||
|
file, err := ioutil.ReadAll(reader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lines of the form "nameserver 1.2.3.4" accumulate.
|
||||||
|
nameservers = []string{}
|
||||||
|
|
||||||
|
// Lines of the form "search example.com" overrule - last one wins.
|
||||||
|
searches = []string{}
|
||||||
|
|
||||||
|
lines := strings.Split(string(file), "\n")
|
||||||
|
for l := range lines {
|
||||||
|
trimmed := strings.TrimSpace(lines[l])
|
||||||
|
if strings.HasPrefix(trimmed, "#") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fields := strings.Fields(trimmed)
|
||||||
|
if len(fields) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if fields[0] == "nameserver" {
|
||||||
|
nameservers = append(nameservers, fields[1:]...)
|
||||||
|
}
|
||||||
|
if fields[0] == "search" {
|
||||||
|
searches = fields[1:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Give the cloud-provider a chance to post-process DNS settings.
|
||||||
|
if dnsScrubber != nil {
|
||||||
|
nameservers, searches = dnsScrubber.ScrubDNS(nameservers, searches)
|
||||||
|
}
|
||||||
|
return nameservers, searches, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// cleanupBandwidthLimits updates the status of bandwidth-limited containers
|
||||||
|
// and ensures that only the the appropriate CIDRs are active on the node.
|
||||||
|
func (kl *Kubelet) cleanupBandwidthLimits(allPods []*api.Pod) error {
|
||||||
|
if kl.shaper == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
currentCIDRs, err := kl.shaper.GetCIDRs()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
possibleCIDRs := sets.String{}
|
||||||
|
for ix := range allPods {
|
||||||
|
pod := allPods[ix]
|
||||||
|
ingress, egress, err := bandwidth.ExtractPodBandwidthResources(pod.Annotations)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if ingress == nil && egress == nil {
|
||||||
|
glog.V(8).Infof("Not a bandwidth limited container...")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
status, found := kl.statusManager.GetPodStatus(pod.UID)
|
||||||
|
if !found {
|
||||||
|
// TODO(random-liu): Cleanup status get functions. (issue #20477)
|
||||||
|
s, err := kl.containerRuntime.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
status = kl.generateAPIPodStatus(pod, s)
|
||||||
|
}
|
||||||
|
if status.Phase == api.PodRunning {
|
||||||
|
possibleCIDRs.Insert(fmt.Sprintf("%s/32", status.PodIP))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, cidr := range currentCIDRs {
|
||||||
|
if !possibleCIDRs.Has(cidr) {
|
||||||
|
glog.V(2).Infof("Removing CIDR: %s (%v)", cidr, possibleCIDRs)
|
||||||
|
if err := kl.shaper.Reset(cidr); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: remove when kubenet plugin is ready
|
||||||
|
// NOTE!!! if you make changes here, also make them to kubenet
|
||||||
|
func (kl *Kubelet) reconcileCBR0(podCIDR string) error {
|
||||||
|
if podCIDR == "" {
|
||||||
|
glog.V(5).Info("PodCIDR not set. Will not configure cbr0.")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
glog.V(5).Infof("PodCIDR is set to %q", podCIDR)
|
||||||
|
_, cidr, err := net.ParseCIDR(podCIDR)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Set cbr0 interface address to first address in IPNet
|
||||||
|
cidr.IP.To4()[3] += 1
|
||||||
|
if err := ensureCbr0(cidr, kl.hairpinMode == componentconfig.PromiscuousBridge, kl.babysitDaemons); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if kl.shapingEnabled() {
|
||||||
|
if kl.shaper == nil {
|
||||||
|
glog.V(5).Info("Shaper is nil, creating")
|
||||||
|
kl.shaper = bandwidth.NewTCShaper("cbr0")
|
||||||
|
}
|
||||||
|
return kl.shaper.ReconcileInterface()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// syncNetworkStatus updates the network state, ensuring that the network is
|
||||||
|
// configured correctly if the kubelet is set to configure cbr0:
|
||||||
|
// * handshake flannel helper if the flannel experimental overlay is being used.
|
||||||
|
// * ensure that iptables masq rules are setup
|
||||||
|
// * reconcile cbr0 with the pod CIDR
|
||||||
|
func (kl *Kubelet) syncNetworkStatus() {
|
||||||
|
var err error
|
||||||
|
if kl.configureCBR0 {
|
||||||
|
if kl.flannelExperimentalOverlay {
|
||||||
|
podCIDR, err := kl.flannelHelper.Handshake()
|
||||||
|
if err != nil {
|
||||||
|
glog.Infof("Flannel server handshake failed %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
kl.updatePodCIDR(podCIDR)
|
||||||
|
}
|
||||||
|
if err := ensureIPTablesMasqRule(kl.nonMasqueradeCIDR); err != nil {
|
||||||
|
err = fmt.Errorf("Error on adding ip table rules: %v", err)
|
||||||
|
glog.Error(err)
|
||||||
|
kl.runtimeState.setNetworkState(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
podCIDR := kl.runtimeState.podCIDR()
|
||||||
|
if len(podCIDR) == 0 {
|
||||||
|
err = fmt.Errorf("ConfigureCBR0 requested, but PodCIDR not set. Will not configure CBR0 right now")
|
||||||
|
glog.Warning(err)
|
||||||
|
} else if err = kl.reconcileCBR0(podCIDR); err != nil {
|
||||||
|
err = fmt.Errorf("Error configuring cbr0: %v", err)
|
||||||
|
glog.Error(err)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
kl.runtimeState.setNetworkState(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
kl.runtimeState.setNetworkState(kl.networkPlugin.Status())
|
||||||
|
}
|
||||||
|
|
||||||
|
// updatePodCIDR updates the pod CIDR in the runtime state if it is different
|
||||||
|
// from the current CIDR.
|
||||||
|
func (kl *Kubelet) updatePodCIDR(cidr string) {
|
||||||
|
if kl.runtimeState.podCIDR() == cidr {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.Infof("Setting Pod CIDR: %v -> %v", kl.runtimeState.podCIDR(), cidr)
|
||||||
|
kl.runtimeState.setPodCIDR(cidr)
|
||||||
|
|
||||||
|
if kl.networkPlugin != nil {
|
||||||
|
details := make(map[string]interface{})
|
||||||
|
details[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR] = cidr
|
||||||
|
kl.networkPlugin.Event(network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE, details)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// shapingEnabled returns whether traffic shaping is enabled.
|
||||||
|
func (kl *Kubelet) shapingEnabled() bool {
|
||||||
|
// Disable shaping if a network plugin is defined and supports shaping
|
||||||
|
if kl.networkPlugin != nil && kl.networkPlugin.Capabilities().Has(network.NET_PLUGIN_CAPABILITY_SHAPING) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
219
pkg/kubelet/kubelet_network_test.go
Normal file
219
pkg/kubelet/kubelet_network_test.go
Normal file
@ -0,0 +1,219 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package kubelet
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"k8s.io/kubernetes/pkg/api"
|
||||||
|
"k8s.io/kubernetes/pkg/util/bandwidth"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNodeIPParam(t *testing.T) {
|
||||||
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||||
|
kubelet := testKubelet.kubelet
|
||||||
|
tests := []struct {
|
||||||
|
nodeIP string
|
||||||
|
success bool
|
||||||
|
testName string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
nodeIP: "",
|
||||||
|
success: true,
|
||||||
|
testName: "IP not set",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
nodeIP: "127.0.0.1",
|
||||||
|
success: false,
|
||||||
|
testName: "loopback address",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
nodeIP: "FE80::0202:B3FF:FE1E:8329",
|
||||||
|
success: false,
|
||||||
|
testName: "IPv6 address",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
nodeIP: "1.2.3.4",
|
||||||
|
success: false,
|
||||||
|
testName: "IPv4 address that doesn't belong to host",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range tests {
|
||||||
|
kubelet.nodeIP = net.ParseIP(test.nodeIP)
|
||||||
|
err := kubelet.validateNodeIP()
|
||||||
|
if err != nil && test.success {
|
||||||
|
t.Errorf("Test: %s, expected no error but got: %v", test.testName, err)
|
||||||
|
} else if err == nil && !test.success {
|
||||||
|
t.Errorf("Test: %s, expected an error", test.testName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type countingDNSScrubber struct {
|
||||||
|
counter *int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cds countingDNSScrubber) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) {
|
||||||
|
(*cds.counter)++
|
||||||
|
return nameservers, searches
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseResolvConf(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
data string
|
||||||
|
nameservers []string
|
||||||
|
searches []string
|
||||||
|
}{
|
||||||
|
{"", []string{}, []string{}},
|
||||||
|
{" ", []string{}, []string{}},
|
||||||
|
{"\n", []string{}, []string{}},
|
||||||
|
{"\t\n\t", []string{}, []string{}},
|
||||||
|
{"#comment\n", []string{}, []string{}},
|
||||||
|
{" #comment\n", []string{}, []string{}},
|
||||||
|
{"#comment\n#comment", []string{}, []string{}},
|
||||||
|
{"#comment\nnameserver", []string{}, []string{}},
|
||||||
|
{"#comment\nnameserver\nsearch", []string{}, []string{}},
|
||||||
|
{"nameserver 1.2.3.4", []string{"1.2.3.4"}, []string{}},
|
||||||
|
{" nameserver 1.2.3.4", []string{"1.2.3.4"}, []string{}},
|
||||||
|
{"\tnameserver 1.2.3.4", []string{"1.2.3.4"}, []string{}},
|
||||||
|
{"nameserver\t1.2.3.4", []string{"1.2.3.4"}, []string{}},
|
||||||
|
{"nameserver \t 1.2.3.4", []string{"1.2.3.4"}, []string{}},
|
||||||
|
{"nameserver 1.2.3.4\nnameserver 5.6.7.8", []string{"1.2.3.4", "5.6.7.8"}, []string{}},
|
||||||
|
{"search foo", []string{}, []string{"foo"}},
|
||||||
|
{"search foo bar", []string{}, []string{"foo", "bar"}},
|
||||||
|
{"search foo bar bat\n", []string{}, []string{"foo", "bar", "bat"}},
|
||||||
|
{"search foo\nsearch bar", []string{}, []string{"bar"}},
|
||||||
|
{"nameserver 1.2.3.4\nsearch foo bar", []string{"1.2.3.4"}, []string{"foo", "bar"}},
|
||||||
|
{"nameserver 1.2.3.4\nsearch foo\nnameserver 5.6.7.8\nsearch bar", []string{"1.2.3.4", "5.6.7.8"}, []string{"bar"}},
|
||||||
|
{"#comment\nnameserver 1.2.3.4\n#comment\nsearch foo\ncomment", []string{"1.2.3.4"}, []string{"foo"}},
|
||||||
|
}
|
||||||
|
for i, tc := range testCases {
|
||||||
|
ns, srch, err := parseResolvConf(strings.NewReader(tc.data), nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("expected success, got %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(ns, tc.nameservers) {
|
||||||
|
t.Errorf("[%d] expected nameservers %#v, got %#v", i, tc.nameservers, ns)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(srch, tc.searches) {
|
||||||
|
t.Errorf("[%d] expected searches %#v, got %#v", i, tc.searches, srch)
|
||||||
|
}
|
||||||
|
|
||||||
|
counter := 0
|
||||||
|
cds := countingDNSScrubber{&counter}
|
||||||
|
ns, srch, err = parseResolvConf(strings.NewReader(tc.data), cds)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("expected success, got %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(ns, tc.nameservers) {
|
||||||
|
t.Errorf("[%d] expected nameservers %#v, got %#v", i, tc.nameservers, ns)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(srch, tc.searches) {
|
||||||
|
t.Errorf("[%d] expected searches %#v, got %#v", i, tc.searches, srch)
|
||||||
|
}
|
||||||
|
if counter != 1 {
|
||||||
|
t.Errorf("[%d] expected dnsScrubber to have been called: got %d", i, counter)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCleanupBandwidthLimits(t *testing.T) {
|
||||||
|
testPod := func(name, ingress string) *api.Pod {
|
||||||
|
pod := podWithUidNameNs("", name, "")
|
||||||
|
|
||||||
|
if len(ingress) != 0 {
|
||||||
|
pod.Annotations["kubernetes.io/ingress-bandwidth"] = ingress
|
||||||
|
}
|
||||||
|
|
||||||
|
return pod
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(random-liu): We removed the test case for pod status not cached here. We should add a higher
|
||||||
|
// layer status getter function and test that function instead.
|
||||||
|
tests := []struct {
|
||||||
|
status *api.PodStatus
|
||||||
|
pods []*api.Pod
|
||||||
|
inputCIDRs []string
|
||||||
|
expectResetCIDRs []string
|
||||||
|
name string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
status: &api.PodStatus{
|
||||||
|
PodIP: "1.2.3.4",
|
||||||
|
Phase: api.PodRunning,
|
||||||
|
},
|
||||||
|
pods: []*api.Pod{
|
||||||
|
testPod("foo", "10M"),
|
||||||
|
testPod("bar", ""),
|
||||||
|
},
|
||||||
|
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
|
||||||
|
expectResetCIDRs: []string{"2.3.4.5/32", "5.6.7.8/32"},
|
||||||
|
name: "pod running",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
status: &api.PodStatus{
|
||||||
|
PodIP: "1.2.3.4",
|
||||||
|
Phase: api.PodFailed,
|
||||||
|
},
|
||||||
|
pods: []*api.Pod{
|
||||||
|
testPod("foo", "10M"),
|
||||||
|
testPod("bar", ""),
|
||||||
|
},
|
||||||
|
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
|
||||||
|
expectResetCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
|
||||||
|
name: "pod not running",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
status: &api.PodStatus{
|
||||||
|
PodIP: "1.2.3.4",
|
||||||
|
Phase: api.PodFailed,
|
||||||
|
},
|
||||||
|
pods: []*api.Pod{
|
||||||
|
testPod("foo", ""),
|
||||||
|
testPod("bar", ""),
|
||||||
|
},
|
||||||
|
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
|
||||||
|
expectResetCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
|
||||||
|
name: "no bandwidth limits",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range tests {
|
||||||
|
shaper := &bandwidth.FakeShaper{
|
||||||
|
CIDRs: test.inputCIDRs,
|
||||||
|
}
|
||||||
|
|
||||||
|
testKube := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||||
|
testKube.kubelet.shaper = shaper
|
||||||
|
|
||||||
|
for _, pod := range test.pods {
|
||||||
|
testKube.kubelet.statusManager.SetPodStatus(pod, *test.status)
|
||||||
|
}
|
||||||
|
|
||||||
|
err := testKube.kubelet.cleanupBandwidthLimits(test.pods)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("unexpected error: %v (%s)", test.name, err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(shaper.ResetCIDRs, test.expectResetCIDRs) {
|
||||||
|
t.Errorf("[%s]\nexpected: %v, saw: %v", test.name, test.expectResetCIDRs, shaper.ResetCIDRs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -28,7 +28,6 @@ import (
|
|||||||
goruntime "runtime"
|
goruntime "runtime"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -1035,46 +1034,6 @@ func TestMakeVolumeMounts(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNodeIPParam(t *testing.T) {
|
|
||||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
|
||||||
kubelet := testKubelet.kubelet
|
|
||||||
tests := []struct {
|
|
||||||
nodeIP string
|
|
||||||
success bool
|
|
||||||
testName string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
nodeIP: "",
|
|
||||||
success: true,
|
|
||||||
testName: "IP not set",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
nodeIP: "127.0.0.1",
|
|
||||||
success: false,
|
|
||||||
testName: "loopback address",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
nodeIP: "FE80::0202:B3FF:FE1E:8329",
|
|
||||||
success: false,
|
|
||||||
testName: "IPv6 address",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
nodeIP: "1.2.3.4",
|
|
||||||
success: false,
|
|
||||||
testName: "IPv4 address that doesn't belong to host",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, test := range tests {
|
|
||||||
kubelet.nodeIP = net.ParseIP(test.nodeIP)
|
|
||||||
err := kubelet.validateNodeIP()
|
|
||||||
if err != nil && test.success {
|
|
||||||
t.Errorf("Test: %s, expected no error but got: %v", test.testName, err)
|
|
||||||
} else if err == nil && !test.success {
|
|
||||||
t.Errorf("Test: %s, expected an error", test.testName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type fakeContainerCommandRunner struct {
|
type fakeContainerCommandRunner struct {
|
||||||
Cmd []string
|
Cmd []string
|
||||||
ID kubecontainer.ContainerID
|
ID kubecontainer.ContainerID
|
||||||
@ -1160,76 +1119,6 @@ func TestRunInContainer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type countingDNSScrubber struct {
|
|
||||||
counter *int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cds countingDNSScrubber) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) {
|
|
||||||
(*cds.counter)++
|
|
||||||
return nameservers, searches
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParseResolvConf(t *testing.T) {
|
|
||||||
testCases := []struct {
|
|
||||||
data string
|
|
||||||
nameservers []string
|
|
||||||
searches []string
|
|
||||||
}{
|
|
||||||
{"", []string{}, []string{}},
|
|
||||||
{" ", []string{}, []string{}},
|
|
||||||
{"\n", []string{}, []string{}},
|
|
||||||
{"\t\n\t", []string{}, []string{}},
|
|
||||||
{"#comment\n", []string{}, []string{}},
|
|
||||||
{" #comment\n", []string{}, []string{}},
|
|
||||||
{"#comment\n#comment", []string{}, []string{}},
|
|
||||||
{"#comment\nnameserver", []string{}, []string{}},
|
|
||||||
{"#comment\nnameserver\nsearch", []string{}, []string{}},
|
|
||||||
{"nameserver 1.2.3.4", []string{"1.2.3.4"}, []string{}},
|
|
||||||
{" nameserver 1.2.3.4", []string{"1.2.3.4"}, []string{}},
|
|
||||||
{"\tnameserver 1.2.3.4", []string{"1.2.3.4"}, []string{}},
|
|
||||||
{"nameserver\t1.2.3.4", []string{"1.2.3.4"}, []string{}},
|
|
||||||
{"nameserver \t 1.2.3.4", []string{"1.2.3.4"}, []string{}},
|
|
||||||
{"nameserver 1.2.3.4\nnameserver 5.6.7.8", []string{"1.2.3.4", "5.6.7.8"}, []string{}},
|
|
||||||
{"search foo", []string{}, []string{"foo"}},
|
|
||||||
{"search foo bar", []string{}, []string{"foo", "bar"}},
|
|
||||||
{"search foo bar bat\n", []string{}, []string{"foo", "bar", "bat"}},
|
|
||||||
{"search foo\nsearch bar", []string{}, []string{"bar"}},
|
|
||||||
{"nameserver 1.2.3.4\nsearch foo bar", []string{"1.2.3.4"}, []string{"foo", "bar"}},
|
|
||||||
{"nameserver 1.2.3.4\nsearch foo\nnameserver 5.6.7.8\nsearch bar", []string{"1.2.3.4", "5.6.7.8"}, []string{"bar"}},
|
|
||||||
{"#comment\nnameserver 1.2.3.4\n#comment\nsearch foo\ncomment", []string{"1.2.3.4"}, []string{"foo"}},
|
|
||||||
}
|
|
||||||
for i, tc := range testCases {
|
|
||||||
ns, srch, err := parseResolvConf(strings.NewReader(tc.data), nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("expected success, got %v", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(ns, tc.nameservers) {
|
|
||||||
t.Errorf("[%d] expected nameservers %#v, got %#v", i, tc.nameservers, ns)
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(srch, tc.searches) {
|
|
||||||
t.Errorf("[%d] expected searches %#v, got %#v", i, tc.searches, srch)
|
|
||||||
}
|
|
||||||
|
|
||||||
counter := 0
|
|
||||||
cds := countingDNSScrubber{&counter}
|
|
||||||
ns, srch, err = parseResolvConf(strings.NewReader(tc.data), cds)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("expected success, got %v", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(ns, tc.nameservers) {
|
|
||||||
t.Errorf("[%d] expected nameservers %#v, got %#v", i, tc.nameservers, ns)
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(srch, tc.searches) {
|
|
||||||
t.Errorf("[%d] expected searches %#v, got %#v", i, tc.searches, srch)
|
|
||||||
}
|
|
||||||
if counter != 1 {
|
|
||||||
t.Errorf("[%d] expected dnsScrubber to have been called: got %d", i, counter)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDNSConfigurationParams(t *testing.T) {
|
func TestDNSConfigurationParams(t *testing.T) {
|
||||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||||
kubelet := testKubelet.kubelet
|
kubelet := testKubelet.kubelet
|
||||||
@ -4127,88 +4016,6 @@ func TestDoesNotDeletePodDirsIfContainerIsRunning(t *testing.T) {
|
|||||||
syncAndVerifyPodDir(t, testKubelet, pods, []*api.Pod{apiPod}, false)
|
syncAndVerifyPodDir(t, testKubelet, pods, []*api.Pod{apiPod}, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCleanupBandwidthLimits(t *testing.T) {
|
|
||||||
testPod := func(name, ingress string) *api.Pod {
|
|
||||||
pod := podWithUidNameNs("", name, "")
|
|
||||||
|
|
||||||
if len(ingress) != 0 {
|
|
||||||
pod.Annotations["kubernetes.io/ingress-bandwidth"] = ingress
|
|
||||||
}
|
|
||||||
|
|
||||||
return pod
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(random-liu): We removed the test case for pod status not cached here. We should add a higher
|
|
||||||
// layer status getter function and test that function instead.
|
|
||||||
tests := []struct {
|
|
||||||
status *api.PodStatus
|
|
||||||
pods []*api.Pod
|
|
||||||
inputCIDRs []string
|
|
||||||
expectResetCIDRs []string
|
|
||||||
name string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
status: &api.PodStatus{
|
|
||||||
PodIP: "1.2.3.4",
|
|
||||||
Phase: api.PodRunning,
|
|
||||||
},
|
|
||||||
pods: []*api.Pod{
|
|
||||||
testPod("foo", "10M"),
|
|
||||||
testPod("bar", ""),
|
|
||||||
},
|
|
||||||
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
|
|
||||||
expectResetCIDRs: []string{"2.3.4.5/32", "5.6.7.8/32"},
|
|
||||||
name: "pod running",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
status: &api.PodStatus{
|
|
||||||
PodIP: "1.2.3.4",
|
|
||||||
Phase: api.PodFailed,
|
|
||||||
},
|
|
||||||
pods: []*api.Pod{
|
|
||||||
testPod("foo", "10M"),
|
|
||||||
testPod("bar", ""),
|
|
||||||
},
|
|
||||||
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
|
|
||||||
expectResetCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
|
|
||||||
name: "pod not running",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
status: &api.PodStatus{
|
|
||||||
PodIP: "1.2.3.4",
|
|
||||||
Phase: api.PodFailed,
|
|
||||||
},
|
|
||||||
pods: []*api.Pod{
|
|
||||||
testPod("foo", ""),
|
|
||||||
testPod("bar", ""),
|
|
||||||
},
|
|
||||||
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
|
|
||||||
expectResetCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
|
|
||||||
name: "no bandwidth limits",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, test := range tests {
|
|
||||||
shaper := &bandwidth.FakeShaper{
|
|
||||||
CIDRs: test.inputCIDRs,
|
|
||||||
}
|
|
||||||
|
|
||||||
testKube := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
|
||||||
testKube.kubelet.shaper = shaper
|
|
||||||
|
|
||||||
for _, pod := range test.pods {
|
|
||||||
testKube.kubelet.statusManager.SetPodStatus(pod, *test.status)
|
|
||||||
}
|
|
||||||
|
|
||||||
err := testKube.kubelet.cleanupBandwidthLimits(test.pods)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("unexpected error: %v (%s)", test.name, err)
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(shaper.ResetCIDRs, test.expectResetCIDRs) {
|
|
||||||
t.Errorf("[%s]\nexpected: %v, saw: %v", test.name, test.expectResetCIDRs, shaper.ResetCIDRs)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExtractBandwidthResources(t *testing.T) {
|
func TestExtractBandwidthResources(t *testing.T) {
|
||||||
four, _ := resource.ParseQuantity("4M")
|
four, _ := resource.ParseQuantity("4M")
|
||||||
ten, _ := resource.ParseQuantity("10M")
|
ten, _ := resource.ParseQuantity("10M")
|
||||||
|
Loading…
Reference in New Issue
Block a user