Remove the flannel experimental overlay

This commit is contained in:
Lucas Käldström 2016-10-04 11:53:53 +03:00
parent f79a53a734
commit 348717c50a
9 changed files with 22 additions and 245 deletions

View File

@ -207,8 +207,6 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
fs.Int32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver")
fs.Int32Var(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
fs.BoolVar(&s.SerializeImagePulls, "serialize-image-pulls", s.SerializeImagePulls, "Pull images one at a time. We recommend *not* changing the default value on nodes that run docker daemon with version < 1.9 or an Aufs storage backend. Issue #10959 has more details. [default=true]")
fs.BoolVar(&s.ExperimentalFlannelOverlay, "experimental-flannel-overlay", s.ExperimentalFlannelOverlay, "Experimental support for starting the kubelet with the default overlay network (flannel). Assumes flanneld is already running in client mode. [default=false]")
fs.MarkDeprecated("experimental-flannel-overlay", "Will be removed in a future version.")
fs.DurationVar(&s.OutOfDiskTransitionFrequency.Duration, "outofdisk-transition-frequency", s.OutOfDiskTransitionFrequency.Duration, "Duration for which the kubelet has to wait before transitioning out of out-of-disk node condition status. Default: 5m0s")
fs.StringVar(&s.NodeIP, "node-ip", s.NodeIP, "IP address of the node. If set, kubelet will use this IP address for the node")
fs.BoolVar(&s.EnableCustomMetrics, "enable-custom-metrics", s.EnableCustomMetrics, "Support for gathering custom metrics.")

View File

@ -180,7 +180,6 @@ executor-suicide-timeout
exit-on-lock-contention
experimental-allowed-unsafe-sysctls
experimental-bootstrap-kubeconfig
experimental-flannel-overlay
experimental-keystone-url
experimental-nvidia-gpus
experimental-prefix

View File

@ -369,10 +369,6 @@ type KubeletConfiguration struct {
// run docker daemon with version < 1.9 or an Aufs storage backend.
// Issue #10959 has more details.
SerializeImagePulls bool `json:"serializeImagePulls"`
// experimentalFlannelOverlay enables experimental support for starting the
// kubelet with the default overlay network (flannel). Assumes flanneld
// is already running in client mode.
ExperimentalFlannelOverlay bool `json:"experimentalFlannelOverlay"`
// outOfDiskTransitionFrequency is duration for which the kubelet has to
// wait before transitioning out of out-of-disk node condition status.
OutOfDiskTransitionFrequency unversioned.Duration `json:"outOfDiskTransitionFrequency,omitempty"`

View File

@ -424,10 +424,6 @@ type KubeletConfiguration struct {
// run docker daemon with version < 1.9 or an Aufs storage backend.
// Issue #10959 has more details.
SerializeImagePulls *bool `json:"serializeImagePulls"`
// experimentalFlannelOverlay enables experimental support for starting the
// kubelet with the default overlay network (flannel). Assumes flanneld
// is already running in client mode.
ExperimentalFlannelOverlay bool `json:"experimentalFlannelOverlay"`
// outOfDiskTransitionFrequency is duration for which the kubelet has to
// wait before transitioning out of out-of-disk node condition status.
OutOfDiskTransitionFrequency unversioned.Duration `json:"outOfDiskTransitionFrequency"`

View File

@ -1,168 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"fmt"
"io/ioutil"
"os"
"strconv"
"strings"
utildbus "k8s.io/kubernetes/pkg/util/dbus"
utilexec "k8s.io/kubernetes/pkg/util/exec"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
"github.com/golang/glog"
)
// TODO: Move all this to a network plugin.
const (
// TODO: The location of default docker options is distro specific, so this
// probably won't work on anything other than debian/ubuntu. This is a
// short-term compromise till we've moved overlay setup into a plugin.
dockerOptsFile = "/etc/default/docker"
flannelSubnetKey = "FLANNEL_SUBNET"
flannelNetworkKey = "FLANNEL_NETWORK"
flannelMtuKey = "FLANNEL_MTU"
dockerOptsKey = "DOCKER_OPTS"
flannelSubnetFile = "/var/run/flannel/subnet.env"
)
// A Kubelet to flannel bridging helper.
type FlannelHelper struct {
subnetFile string
iptablesHelper utiliptables.Interface
}
// NewFlannelHelper creates a new flannel helper.
func NewFlannelHelper() *FlannelHelper {
return &FlannelHelper{
subnetFile: flannelSubnetFile,
iptablesHelper: utiliptables.New(utilexec.New(), utildbus.New(), utiliptables.ProtocolIpv4),
}
}
// Ensure the required MASQUERADE rules exist for the given network/cidr.
func (f *FlannelHelper) ensureFlannelMasqRule(kubeNetwork, podCIDR string) error {
// TODO: Investigate delegation to flannel via -ip-masq=true once flannel
// issue #374 is resolved.
comment := "Flannel masquerade facilitates pod<->node traffic."
args := []string{
"-m", "comment", "--comment", comment,
"!", "-d", kubeNetwork, "-s", podCIDR, "-j", "MASQUERADE",
}
_, err := f.iptablesHelper.EnsureRule(
utiliptables.Append,
utiliptables.TableNAT,
utiliptables.ChainPostrouting,
args...)
return err
}
// Handshake waits for the flannel subnet file and installs a few IPTables
// rules, returning the pod CIDR allocated for this node.
func (f *FlannelHelper) Handshake() (podCIDR string, err error) {
// TODO: Using a file to communicate is brittle
if _, err = os.Stat(f.subnetFile); err != nil {
return "", fmt.Errorf("Waiting for subnet file %v", f.subnetFile)
}
glog.Infof("Found flannel subnet file %v", f.subnetFile)
config, err := parseKVConfig(f.subnetFile)
if err != nil {
return "", err
}
if err = writeDockerOptsFromFlannelConfig(config); err != nil {
return "", err
}
podCIDR, ok := config[flannelSubnetKey]
if !ok {
return "", fmt.Errorf("No flannel subnet, config %+v", config)
}
kubeNetwork, ok := config[flannelNetworkKey]
if !ok {
return "", fmt.Errorf("No flannel network, config %+v", config)
}
if f.ensureFlannelMasqRule(kubeNetwork, podCIDR); err != nil {
return "", fmt.Errorf("Unable to install flannel masquerade %v", err)
}
return podCIDR, nil
}
// Take env variables from flannel subnet env and write to /etc/docker/defaults.
func writeDockerOptsFromFlannelConfig(flannelConfig map[string]string) error {
// TODO: Write dockeropts to unit file on systemd machines
// https://github.com/docker/docker/issues/9889
mtu, ok := flannelConfig[flannelMtuKey]
if !ok {
return fmt.Errorf("No flannel mtu, flannel config %+v", flannelConfig)
}
dockerOpts, err := parseKVConfig(dockerOptsFile)
if err != nil {
return err
}
opts, ok := dockerOpts[dockerOptsKey]
if !ok {
glog.Errorf("Did not find docker opts, writing them")
opts = fmt.Sprintf(
" --bridge=cbr0 --iptables=false --ip-masq=false")
} else {
opts, _ = strconv.Unquote(opts)
}
dockerOpts[dockerOptsKey] = fmt.Sprintf("\"%v --mtu=%v\"", opts, mtu)
if err = writeKVConfig(dockerOptsFile, dockerOpts); err != nil {
return err
}
return nil
}
// parseKVConfig takes a file with key-value env variables and returns a dictionary mapping the same.
func parseKVConfig(filename string) (map[string]string, error) {
config := map[string]string{}
if _, err := os.Stat(filename); err != nil {
return config, err
}
buff, err := ioutil.ReadFile(filename)
if err != nil {
return config, err
}
str := string(buff)
glog.Infof("Read kv options %+v from %v", str, filename)
for _, line := range strings.Split(str, "\n") {
kv := strings.Split(line, "=")
if len(kv) != 2 {
glog.Warningf("Ignoring non key-value pair %v", kv)
continue
}
config[string(kv[0])] = string(kv[1])
}
return config, nil
}
// writeKVConfig writes a kv map as env variables into the given file.
func writeKVConfig(filename string, kv map[string]string) error {
if _, err := os.Stat(filename); err != nil {
return err
}
content := ""
for k, v := range kv {
content += fmt.Sprintf("%v=%v\n", k, v)
}
glog.Warningf("Writing kv options %+v to %v", content, filename)
return ioutil.WriteFile(filename, []byte(content), 0644)
}

View File

@ -392,10 +392,8 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
// TODO(mtaufen): remove when internal cbr0 implementation gets removed in favor
// of the kubenet network plugin
var myConfigureCBR0 bool = kubeCfg.ConfigureCBR0
var myFlannelExperimentalOverlay bool = kubeCfg.ExperimentalFlannelOverlay
if kubeCfg.NetworkPluginName == "kubenet" {
myConfigureCBR0 = false
myFlannelExperimentalOverlay = false
}
klet := &Kubelet{
@ -426,27 +424,25 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
nodeRef: nodeRef,
nodeLabels: kubeCfg.NodeLabels,
nodeStatusUpdateFrequency: kubeCfg.NodeStatusUpdateFrequency.Duration,
os: kubeDeps.OSInterface,
oomWatcher: oomWatcher,
cgroupsPerQOS: kubeCfg.CgroupsPerQOS,
cgroupRoot: kubeCfg.CgroupRoot,
mounter: kubeDeps.Mounter,
writer: kubeDeps.Writer,
configureCBR0: myConfigureCBR0,
nonMasqueradeCIDR: kubeCfg.NonMasqueradeCIDR,
reconcileCIDR: kubeCfg.ReconcileCIDR,
maxPods: int(kubeCfg.MaxPods),
podsPerCore: int(kubeCfg.PodsPerCore),
nvidiaGPUs: int(kubeCfg.NvidiaGPUs),
syncLoopMonitor: atomic.Value{},
resolverConfig: kubeCfg.ResolverConfig,
cpuCFSQuota: kubeCfg.CPUCFSQuota,
daemonEndpoints: daemonEndpoints,
containerManager: kubeDeps.ContainerManager,
flannelExperimentalOverlay: myFlannelExperimentalOverlay,
flannelHelper: nil,
nodeIP: net.ParseIP(kubeCfg.NodeIP),
clock: clock.RealClock{},
os: kubeDeps.OSInterface,
oomWatcher: oomWatcher,
cgroupsPerQOS: kubeCfg.CgroupsPerQOS,
cgroupRoot: kubeCfg.CgroupRoot,
mounter: kubeDeps.Mounter,
writer: kubeDeps.Writer,
configureCBR0: myConfigureCBR0,
nonMasqueradeCIDR: kubeCfg.NonMasqueradeCIDR,
reconcileCIDR: kubeCfg.ReconcileCIDR,
maxPods: int(kubeCfg.MaxPods),
podsPerCore: int(kubeCfg.PodsPerCore),
nvidiaGPUs: int(kubeCfg.NvidiaGPUs),
syncLoopMonitor: atomic.Value{},
resolverConfig: kubeCfg.ResolverConfig,
cpuCFSQuota: kubeCfg.CPUCFSQuota,
daemonEndpoints: daemonEndpoints,
containerManager: kubeDeps.ContainerManager,
nodeIP: net.ParseIP(kubeCfg.NodeIP),
clock: clock.RealClock{},
outOfDiskTransitionFrequency: kubeCfg.OutOfDiskTransitionFrequency.Duration,
reservation: *reservation,
enableCustomMetrics: kubeCfg.EnableCustomMetrics,
@ -458,11 +454,6 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
iptablesDropBit: int(kubeCfg.IPTablesDropBit),
}
if klet.flannelExperimentalOverlay {
klet.flannelHelper = NewFlannelHelper()
glog.Infof("Flannel is in charge of podCIDR and overlay networking.")
}
if mode, err := effectiveHairpinMode(componentconfig.HairpinMode(kubeCfg.HairpinMode), kubeCfg.ContainerRuntime, kubeCfg.ConfigureCBR0, kubeCfg.NetworkPluginName); err != nil {
// This is a non-recoverable error. Returning it up the callstack will just
// lead to retries of the same failure, so just fail hard.
@ -965,15 +956,6 @@ type Kubelet struct {
// oneTimeInitializer is used to initialize modules that are dependent on the runtime to be up.
oneTimeInitializer sync.Once
// flannelExperimentalOverlay determines whether the experimental flannel
// network overlay is active.
flannelExperimentalOverlay bool
// TODO: Flannelhelper doesn't store any state, we can instantiate it
// on the fly if we're confident the dbus connetions it opens doesn't
// put the system under duress.
flannelHelper *FlannelHelper
// If non-nil, use this IP address for the node
nodeIP net.IP

View File

@ -88,7 +88,7 @@ func (kl *Kubelet) providerRequiresNetworkingConfiguration() bool {
// is used or whether we are using overlay networking. We should return
// true for cloud providers if they implement Routes() interface and
// we are not using overlay networking.
if kl.cloud == nil || kl.cloud.ProviderName() != "gce" || kl.flannelExperimentalOverlay {
if kl.cloud == nil || kl.cloud.ProviderName() != "gce" {
return false
}
_, supported := kl.cloud.Routes()
@ -224,22 +224,13 @@ func (kl *Kubelet) reconcileCBR0(podCIDR string) error {
// syncNetworkStatus updates the network state, ensuring that the network is
// configured correctly if the kubelet is set to configure cbr0:
// * handshake flannel helper if the flannel experimental overlay is being used.
// * ensure that iptables masq rules are setup
// * reconcile cbr0 with the pod CIDR
func (kl *Kubelet) syncNetworkStatus() {
var err error
if kl.configureCBR0 {
if kl.flannelExperimentalOverlay {
podCIDR, err := kl.flannelHelper.Handshake()
if err != nil {
glog.Infof("Flannel server handshake failed %v", err)
return
}
kl.updatePodCIDR(podCIDR)
}
if err := ensureIPTablesMasqRule(kl.iptClient, kl.nonMasqueradeCIDR); err != nil {
err = fmt.Errorf("Error on adding ip table rules: %v", err)
err = fmt.Errorf("Error on adding iptables rules: %v", err)
glog.Error(err)
kl.runtimeState.setNetworkState(err)
return

View File

@ -317,22 +317,7 @@ func (kl *Kubelet) tryUpdateNodeStatus() error {
return fmt.Errorf("no node instance returned for %q", kl.nodeName)
}
// Flannel is the authoritative source of pod CIDR, if it's running.
// This is a short term compromise till we get flannel working in
// reservation mode.
if kl.flannelExperimentalOverlay {
flannelPodCIDR := kl.runtimeState.podCIDR()
if node.Spec.PodCIDR != flannelPodCIDR {
node.Spec.PodCIDR = flannelPodCIDR
glog.Infof("Updating podcidr to %v", node.Spec.PodCIDR)
if updatedNode, err := kl.kubeClient.Core().Nodes().Update(node); err != nil {
glog.Warningf("Failed to update podCIDR: %v", err)
} else {
// Update the node resourceVersion so the status update doesn't fail.
node = updatedNode
}
}
} else if kl.reconcileCIDR {
if kl.reconcileCIDR {
kl.updatePodCIDR(node.Spec.PodCIDR)
}

View File

@ -29,8 +29,6 @@ const (
// ControllerManagerPort is the default port for the controller manager status server.
// May be overridden by a flag at startup.
ControllerManagerPort = 10252
// Port for flannel daemon.
FlannelDaemonPort = 10253
// KubeletReadOnlyPort exposes basic read-only services from the kubelet.
// May be overridden by a flag at startup.
// This is necessary for heapster to collect monitoring stats from the kubelet