mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
Turn flannel off by default
This commit is contained in:
parent
ad2d3d4c20
commit
9aa0efa393
@ -113,7 +113,7 @@ ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota
|
|||||||
KUBE_UP_AUTOMATIC_CLEANUP=${KUBE_UP_AUTOMATIC_CLEANUP:-false}
|
KUBE_UP_AUTOMATIC_CLEANUP=${KUBE_UP_AUTOMATIC_CLEANUP:-false}
|
||||||
|
|
||||||
# OpenContrail networking plugin specific settings
|
# OpenContrail networking plugin specific settings
|
||||||
NETWORK_PROVIDER="${NETWORK_PROVIDER:-none}" # opencontrail
|
NETWORK_PROVIDER="${NETWORK_PROVIDER:-none}" # opencontrail, flannel
|
||||||
OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}"
|
OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}"
|
||||||
OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}"
|
OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}"
|
||||||
OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}"
|
OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}"
|
||||||
|
@ -125,12 +125,10 @@ KUBE_UP_AUTOMATIC_CLEANUP=${KUBE_UP_AUTOMATIC_CLEANUP:-false}
|
|||||||
TEST_CLUSTER="${TEST_CLUSTER:-true}"
|
TEST_CLUSTER="${TEST_CLUSTER:-true}"
|
||||||
|
|
||||||
# OpenContrail networking plugin specific settings
|
# OpenContrail networking plugin specific settings
|
||||||
NETWORK_PROVIDER="${NETWORK_PROVIDER:-none}" # opencontrail
|
NETWORK_PROVIDER="${NETWORK_PROVIDER:-none}" # opencontrail,flannel
|
||||||
OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}"
|
OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}"
|
||||||
OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}"
|
OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}"
|
||||||
OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}"
|
OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}"
|
||||||
|
|
||||||
# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
|
# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
|
||||||
E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}
|
E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}
|
||||||
# Overlay network settings
|
|
||||||
OVERLAY_NETWORK=${OVERLAY_NETWORK:-true}
|
|
||||||
|
@ -2,17 +2,17 @@
|
|||||||
"kind": "Pod",
|
"kind": "Pod",
|
||||||
"apiVersion": "v1",
|
"apiVersion": "v1",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"name": "flannel-helper",
|
"name": "flannel-server",
|
||||||
"namespace": "kube-system",
|
"namespace": "kube-system",
|
||||||
"labels": {
|
"labels": {
|
||||||
"app": "flannel-helper",
|
"app": "flannel-server",
|
||||||
"version": "v0.1"
|
"version": "v0.1"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"spec": {
|
"spec": {
|
||||||
"volumes": [
|
"volumes": [
|
||||||
{
|
{
|
||||||
"name": "varlogflannel",
|
"name": "varlog",
|
||||||
"hostPath": {
|
"hostPath": {
|
||||||
"path": "/var/log"
|
"path": "/var/log"
|
||||||
}
|
}
|
||||||
@ -30,8 +30,8 @@
|
|||||||
],
|
],
|
||||||
"containers": [
|
"containers": [
|
||||||
{
|
{
|
||||||
"name": "flannel-helper",
|
"name": "flannel-server-helper",
|
||||||
"image": "bprashanth/flannel-helper:0.1",
|
"image": "gcr.io/google_containers/flannel-server-helper:0.1",
|
||||||
"args": [
|
"args": [
|
||||||
"--network-config=/etc/kubernetes/network.json",
|
"--network-config=/etc/kubernetes/network.json",
|
||||||
"--etcd-prefix=/kubernetes.io/network",
|
"--etcd-prefix=/kubernetes.io/network",
|
||||||
@ -66,7 +66,7 @@
|
|||||||
},
|
},
|
||||||
"volumeMounts": [
|
"volumeMounts": [
|
||||||
{
|
{
|
||||||
"name": "varlogflannel",
|
"name": "varlog",
|
||||||
"mountPath": "/var/log"
|
"mountPath": "/var/log"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"Network": "192.168.0.0/16",
|
"Network": "172.16.0.0/12",
|
||||||
"SubnetLen": 26,
|
"SubnetLen": 24,
|
||||||
"Backend": {
|
"Backend": {
|
||||||
"Type": "vxlan",
|
"Type": "vxlan",
|
||||||
"VNI": 1
|
"VNI": 1
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
# TODO: Run flannel daemon in a static pod once we've moved the overlay network
|
||||||
|
# setup into a network plugin.
|
||||||
flannel-tar:
|
flannel-tar:
|
||||||
archive:
|
archive:
|
||||||
- extracted
|
- extracted
|
||||||
|
@ -10,7 +10,11 @@
|
|||||||
{% if pillar['cluster_cidr'] is defined and pillar['cluster_cidr'] != "" -%}
|
{% if pillar['cluster_cidr'] is defined and pillar['cluster_cidr'] != "" -%}
|
||||||
{% set cluster_cidr = "--cluster-cidr=" + pillar['cluster_cidr'] -%}
|
{% set cluster_cidr = "--cluster-cidr=" + pillar['cluster_cidr'] -%}
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
{% if pillar['allocate_node_cidrs'] is defined -%}
|
# When we're using flannel it is responsible for cidr allocation.
|
||||||
|
# This is expected to be a short-term compromise.
|
||||||
|
{% if pillar.get('network_provider', '').lower() == 'flannel' %}
|
||||||
|
{% set allocate_node_cidrs = "--allocate-node-cidrs=false" -%}
|
||||||
|
{% elif pillar['allocate_node_cidrs'] is defined -%}
|
||||||
{% set allocate_node_cidrs = "--allocate-node-cidrs=" + pillar['allocate_node_cidrs'] -%}
|
{% set allocate_node_cidrs = "--allocate-node-cidrs=" + pillar['allocate_node_cidrs'] -%}
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
{% if pillar['terminated_pod_gc_threshold'] is defined -%}
|
{% if pillar['terminated_pod_gc_threshold'] is defined -%}
|
||||||
@ -39,7 +43,7 @@
|
|||||||
{% set root_ca_file = "--root-ca-file=/srv/kubernetes/ca.crt" -%}
|
{% set root_ca_file = "--root-ca-file=/srv/kubernetes/ca.crt" -%}
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
|
||||||
{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " --allocate-node-cidrs=false" + " " + terminated_pod_gc + " " + cloud_provider + " " + cloud_config + service_account_key + pillar['log_level'] + " " + root_ca_file -%}
|
{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + terminated_pod_gc + " " + cloud_provider + " " + cloud_config + service_account_key + pillar['log_level'] + " " + root_ca_file -%}
|
||||||
|
|
||||||
|
|
||||||
# test_args has to be kept at the end, so they'll overwrite any prior configuration
|
# test_args has to be kept at the end, so they'll overwrite any prior configuration
|
||||||
|
@ -85,6 +85,11 @@
|
|||||||
{% set configure_cbr0 = "--configure-cbr0=" + pillar['allocate_node_cidrs'] -%}
|
{% set configure_cbr0 = "--configure-cbr0=" + pillar['allocate_node_cidrs'] -%}
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
|
||||||
|
{% set experimental_flannel_overlay = "" -%}
|
||||||
|
{% if pillar.get('network_provider', '').lower() == 'flannel' %}
|
||||||
|
{% set experimental_flannel_overlay = "--experimental-flannel-overlay=true" %}
|
||||||
|
{% endif -%}
|
||||||
|
|
||||||
# Run containers under the root cgroup and create a system container.
|
# Run containers under the root cgroup and create a system container.
|
||||||
{% set system_container = "" -%}
|
{% set system_container = "" -%}
|
||||||
{% set cgroup_root = "" -%}
|
{% set cgroup_root = "" -%}
|
||||||
@ -117,4 +122,4 @@
|
|||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
|
||||||
# test_args has to be kept at the end, so they'll overwrite any prior configuration
|
# test_args has to be kept at the end, so they'll overwrite any prior configuration
|
||||||
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{config}} {{manifest_url}} --allow-privileged={{pillar['allow_privileged']}} {{pillar['log_level']}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{cgroup_root}} {{system_container}} {{pod_cidr}} {{ master_kubelet_args }} {{cpu_cfs_quota}} {{network_plugin}} {{test_args}}"
|
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{config}} {{manifest_url}} --allow-privileged={{pillar['allow_privileged']}} {{pillar['log_level']}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{cgroup_root}} {{system_container}} {{pod_cidr}} {{ master_kubelet_args }} {{cpu_cfs_quota}} {{network_plugin}} {{experimental_flannel_overlay}} {{test_args}}"
|
||||||
|
@ -13,7 +13,9 @@ base:
|
|||||||
'roles:kubernetes-pool':
|
'roles:kubernetes-pool':
|
||||||
- match: grain
|
- match: grain
|
||||||
- docker
|
- docker
|
||||||
|
{% if pillar.get('network_provider', '').lower() == 'flannel' %}
|
||||||
- flannel
|
- flannel
|
||||||
|
{% endif %}
|
||||||
- helpers
|
- helpers
|
||||||
- cadvisor
|
- cadvisor
|
||||||
- kube-client-tools
|
- kube-client-tools
|
||||||
@ -41,8 +43,10 @@ base:
|
|||||||
- match: grain
|
- match: grain
|
||||||
- generate-cert
|
- generate-cert
|
||||||
- etcd
|
- etcd
|
||||||
|
{% if pillar.get('network_provider', '').lower() == 'flannel' %}
|
||||||
- flannel-server
|
- flannel-server
|
||||||
- flannel
|
- flannel
|
||||||
|
{% endif %}
|
||||||
- kube-apiserver
|
- kube-apiserver
|
||||||
- kube-controller-manager
|
- kube-controller-manager
|
||||||
- kube-scheduler
|
- kube-scheduler
|
||||||
|
@ -292,8 +292,7 @@ func (s *CMServer) Run(_ []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if s.AllocateNodeCIDRs {
|
if s.AllocateNodeCIDRs {
|
||||||
// TODO: Pipe this as a command line flag that corresponds to overlay==true
|
if cloud == nil {
|
||||||
if cloud == nil || true {
|
|
||||||
glog.Warning("allocate-node-cidrs is set, but no cloud provider specified. Will not manage routes.")
|
glog.Warning("allocate-node-cidrs is set, but no cloud provider specified. Will not manage routes.")
|
||||||
} else if routes, ok := cloud.Routes(); !ok {
|
} else if routes, ok := cloud.Routes(); !ok {
|
||||||
glog.Warning("allocate-node-cidrs is set, but cloud provider does not support routes. Will not manage routes.")
|
glog.Warning("allocate-node-cidrs is set, but cloud provider does not support routes. Will not manage routes.")
|
||||||
@ -301,6 +300,8 @@ func (s *CMServer) Run(_ []string) error {
|
|||||||
routeController := routecontroller.New(routes, kubeClient, s.ClusterName, &s.ClusterCIDR)
|
routeController := routecontroller.New(routes, kubeClient, s.ClusterName, &s.ClusterCIDR)
|
||||||
routeController.Run(s.NodeSyncPeriod)
|
routeController.Run(s.NodeSyncPeriod)
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
glog.Infof("allocate-node-cidrs set to %v, node controller not creating routes", s.AllocateNodeCIDRs)
|
||||||
}
|
}
|
||||||
|
|
||||||
resourcequotacontroller.NewResourceQuotaController(kubeClient).Run(s.ResourceQuotaSyncPeriod)
|
resourcequotacontroller.NewResourceQuotaController(kubeClient).Run(s.ResourceQuotaSyncPeriod)
|
||||||
|
@ -68,9 +68,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
defaultRootDir = "/var/lib/kubelet"
|
defaultRootDir = "/var/lib/kubelet"
|
||||||
networkConfig = "/var/run/flannel/network.json"
|
experimentalFlannelOverlay = false
|
||||||
useDefaultOverlay = true
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// KubeletServer encapsulates all of the parameters necessary for starting up
|
// KubeletServer encapsulates all of the parameters necessary for starting up
|
||||||
@ -158,8 +157,8 @@ type KubeletServer struct {
|
|||||||
KubeAPIBurst int
|
KubeAPIBurst int
|
||||||
|
|
||||||
// Pull images one at a time.
|
// Pull images one at a time.
|
||||||
SerializeImagePulls bool
|
SerializeImagePulls bool
|
||||||
UseDefaultOverlay bool
|
ExperimentalFlannelOverlay bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// bootstrapping interface for kubelet, targets the initialization protocol
|
// bootstrapping interface for kubelet, targets the initialization protocol
|
||||||
@ -232,8 +231,7 @@ func NewKubeletServer() *KubeletServer {
|
|||||||
ReconcileCIDR: true,
|
ReconcileCIDR: true,
|
||||||
KubeAPIQPS: 5.0,
|
KubeAPIQPS: 5.0,
|
||||||
KubeAPIBurst: 10,
|
KubeAPIBurst: 10,
|
||||||
// Flannel parameters
|
ExperimentalFlannelOverlay: experimentalFlannelOverlay,
|
||||||
UseDefaultOverlay: useDefaultOverlay,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -348,9 +346,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
|
|||||||
fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver")
|
fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver")
|
||||||
fs.IntVar(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
|
fs.IntVar(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
|
||||||
fs.BoolVar(&s.SerializeImagePulls, "serialize-image-pulls", s.SerializeImagePulls, "Pull images one at a time. We recommend *not* changing the default value on nodes that run docker daemon with version < 1.9 or an Aufs storage backend. Issue #10959 has more details. [default=true]")
|
fs.BoolVar(&s.SerializeImagePulls, "serialize-image-pulls", s.SerializeImagePulls, "Pull images one at a time. We recommend *not* changing the default value on nodes that run docker daemon with version < 1.9 or an Aufs storage backend. Issue #10959 has more details. [default=true]")
|
||||||
|
fs.BoolVar(&s.ExperimentalFlannelOverlay, "experimental-flannel-overlay", s.ExperimentalFlannelOverlay, "Experimental support for starting the kubelet with the default overlay network (flannel). Assumes flanneld is already running in client mode. [default=false]")
|
||||||
// Flannel config parameters
|
|
||||||
fs.BoolVar(&s.UseDefaultOverlay, "use-default-overlay", s.UseDefaultOverlay, "Experimental support for starting the kubelet with the default overlay network (flannel). Assumes flanneld is already running in client mode. [default=false]")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnsecuredKubeletConfig returns a KubeletConfig suitable for being run, or an error if the server setup
|
// UnsecuredKubeletConfig returns a KubeletConfig suitable for being run, or an error if the server setup
|
||||||
@ -489,7 +485,7 @@ func (s *KubeletServer) UnsecuredKubeletConfig() (*KubeletConfig, error) {
|
|||||||
Writer: writer,
|
Writer: writer,
|
||||||
VolumePlugins: ProbeVolumePlugins(),
|
VolumePlugins: ProbeVolumePlugins(),
|
||||||
|
|
||||||
UseDefaultOverlay: s.UseDefaultOverlay,
|
ExperimentalFlannelOverlay: s.ExperimentalFlannelOverlay,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -962,7 +958,7 @@ type KubeletConfig struct {
|
|||||||
Writer io.Writer
|
Writer io.Writer
|
||||||
VolumePlugins []volume.VolumePlugin
|
VolumePlugins []volume.VolumePlugin
|
||||||
|
|
||||||
UseDefaultOverlay bool
|
ExperimentalFlannelOverlay bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.PodConfig, err error) {
|
func CreateAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.PodConfig, err error) {
|
||||||
@ -1045,8 +1041,7 @@ func CreateAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.Pod
|
|||||||
kc.OOMAdjuster,
|
kc.OOMAdjuster,
|
||||||
kc.SerializeImagePulls,
|
kc.SerializeImagePulls,
|
||||||
kc.ContainerManager,
|
kc.ContainerManager,
|
||||||
// Flannel parameters
|
kc.ExperimentalFlannelOverlay,
|
||||||
kc.UseDefaultOverlay,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -85,6 +85,7 @@ kubelet
|
|||||||
--enable-server[=true]: Enable the Kubelet's server
|
--enable-server[=true]: Enable the Kubelet's server
|
||||||
--event-burst=10: Maximum size of a bursty event records, temporarily allows event records to burst to this number, while still not exceeding event-qps. Only used if --event-qps > 0
|
--event-burst=10: Maximum size of a bursty event records, temporarily allows event records to burst to this number, while still not exceeding event-qps. Only used if --event-qps > 0
|
||||||
--event-qps=5: If > 0, limit event creations per second to this value. If 0, unlimited.
|
--event-qps=5: If > 0, limit event creations per second to this value. If 0, unlimited.
|
||||||
|
--experimental-flannel-overlay[=false]: Experimental support for starting the kubelet with the default overlay network (flannel). Assumes flanneld is already running in client mode. [default=false]
|
||||||
--file-check-frequency=20s: Duration between checking config files for new data
|
--file-check-frequency=20s: Duration between checking config files for new data
|
||||||
--google-json-key="": The Google Cloud Platform Service Account JSON Key to use for authentication.
|
--google-json-key="": The Google Cloud Platform Service Account JSON Key to use for authentication.
|
||||||
--healthz-bind-address=127.0.0.1: The IP address for the healthz server to serve on, defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)
|
--healthz-bind-address=127.0.0.1: The IP address for the healthz server to serve on, defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)
|
||||||
@ -137,10 +138,9 @@ kubelet
|
|||||||
--system-container="": Optional resource-only container in which to place all non-kernel processes that are not already in a container. Empty for no container. Rolling back the flag requires a reboot. (Default: "").
|
--system-container="": Optional resource-only container in which to place all non-kernel processes that are not already in a container. Empty for no container. Rolling back the flag requires a reboot. (Default: "").
|
||||||
--tls-cert-file="": File containing x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory passed to --cert-dir.
|
--tls-cert-file="": File containing x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory passed to --cert-dir.
|
||||||
--tls-private-key-file="": File containing x509 private key matching --tls-cert-file.
|
--tls-private-key-file="": File containing x509 private key matching --tls-cert-file.
|
||||||
--use-default-overlay[=true]: Experimental support for starting the kubelet with the default overlay network (flannel). Assumes flanneld is already running in client mode. [default=false]
|
|
||||||
```
|
```
|
||||||
|
|
||||||
###### Auto generated by spf13/cobra on 23-Nov-2015
|
###### Auto generated by spf13/cobra on 24-Nov-2015
|
||||||
|
|
||||||
|
|
||||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||||
|
@ -327,4 +327,4 @@ watch-only
|
|||||||
whitelist-override-label
|
whitelist-override-label
|
||||||
windows-line-endings
|
windows-line-endings
|
||||||
www-prefix
|
www-prefix
|
||||||
use-default-overlay
|
experimental-flannel-overlay
|
||||||
|
@ -32,6 +32,9 @@ import (
|
|||||||
|
|
||||||
// TODO: Move all this to a network plugin.
|
// TODO: Move all this to a network plugin.
|
||||||
const (
|
const (
|
||||||
|
// TODO: The location of default docker options is distro specific, so this
|
||||||
|
// probably won't work on anything other than debian/ubuntu. This is a
|
||||||
|
// short-term compromise till we've moved overlay setup into a plugin.
|
||||||
dockerOptsFile = "/etc/default/docker"
|
dockerOptsFile = "/etc/default/docker"
|
||||||
flannelSubnetKey = "FLANNEL_SUBNET"
|
flannelSubnetKey = "FLANNEL_SUBNET"
|
||||||
flannelNetworkKey = "FLANNEL_NETWORK"
|
flannelNetworkKey = "FLANNEL_NETWORK"
|
||||||
@ -78,7 +81,7 @@ func (f *FlannelHelper) Handshake() (podCIDR string, err error) {
|
|||||||
if _, err = os.Stat(f.subnetFile); err != nil {
|
if _, err = os.Stat(f.subnetFile); err != nil {
|
||||||
return "", fmt.Errorf("Waiting for subnet file %v", f.subnetFile)
|
return "", fmt.Errorf("Waiting for subnet file %v", f.subnetFile)
|
||||||
}
|
}
|
||||||
glog.Infof("(kubelet)Found flannel subnet file %v", f.subnetFile)
|
glog.Infof("Found flannel subnet file %v", f.subnetFile)
|
||||||
|
|
||||||
config, err := parseKVConfig(f.subnetFile)
|
config, err := parseKVConfig(f.subnetFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -115,7 +118,7 @@ func writeDockerOptsFromFlannelConfig(flannelConfig map[string]string) error {
|
|||||||
}
|
}
|
||||||
opts, ok := dockerOpts[dockerOptsKey]
|
opts, ok := dockerOpts[dockerOptsKey]
|
||||||
if !ok {
|
if !ok {
|
||||||
glog.Errorf("(kubelet)Did not find docker opts, writing them")
|
glog.Errorf("Did not find docker opts, writing them")
|
||||||
opts = fmt.Sprintf(
|
opts = fmt.Sprintf(
|
||||||
" --bridge=cbr0 --iptables=false --ip-masq=false")
|
" --bridge=cbr0 --iptables=false --ip-masq=false")
|
||||||
} else {
|
} else {
|
||||||
@ -139,7 +142,7 @@ func parseKVConfig(filename string) (map[string]string, error) {
|
|||||||
return config, err
|
return config, err
|
||||||
}
|
}
|
||||||
str := string(buff)
|
str := string(buff)
|
||||||
glog.Infof("(kubelet) Read kv options %+v from %v", str, filename)
|
glog.Infof("Read kv options %+v from %v", str, filename)
|
||||||
for _, line := range strings.Split(str, "\n") {
|
for _, line := range strings.Split(str, "\n") {
|
||||||
kv := strings.Split(line, "=")
|
kv := strings.Split(line, "=")
|
||||||
if len(kv) != 2 {
|
if len(kv) != 2 {
|
||||||
@ -160,6 +163,6 @@ func writeKVConfig(filename string, kv map[string]string) error {
|
|||||||
for k, v := range kv {
|
for k, v := range kv {
|
||||||
content += fmt.Sprintf("%v=%v\n", k, v)
|
content += fmt.Sprintf("%v=%v\n", k, v)
|
||||||
}
|
}
|
||||||
glog.Warningf("(kubelet)Writing kv options %+v to %v", content, filename)
|
glog.Warningf("Writing kv options %+v to %v", content, filename)
|
||||||
return ioutil.WriteFile(filename, []byte(content), 0644)
|
return ioutil.WriteFile(filename, []byte(content), 0644)
|
||||||
}
|
}
|
||||||
|
@ -217,7 +217,7 @@ func NewMainKubelet(
|
|||||||
oomAdjuster *oom.OOMAdjuster,
|
oomAdjuster *oom.OOMAdjuster,
|
||||||
serializeImagePulls bool,
|
serializeImagePulls bool,
|
||||||
containerManager cm.ContainerManager,
|
containerManager cm.ContainerManager,
|
||||||
useDefaultOverlay bool,
|
flannelExperimentalOverlay bool,
|
||||||
) (*Kubelet, error) {
|
) (*Kubelet, error) {
|
||||||
|
|
||||||
if rootDirectory == "" {
|
if rootDirectory == "" {
|
||||||
@ -328,12 +328,19 @@ func NewMainKubelet(
|
|||||||
cpuCFSQuota: cpuCFSQuota,
|
cpuCFSQuota: cpuCFSQuota,
|
||||||
daemonEndpoints: daemonEndpoints,
|
daemonEndpoints: daemonEndpoints,
|
||||||
containerManager: containerManager,
|
containerManager: containerManager,
|
||||||
|
flannelExperimentalOverlay: flannelExperimentalOverlay,
|
||||||
flannelHelper: NewFlannelHelper(),
|
flannelHelper: NewFlannelHelper(),
|
||||||
useDefaultOverlay: useDefaultOverlay,
|
}
|
||||||
|
if klet.flannelExperimentalOverlay {
|
||||||
|
glog.Infof("Flannel is in charge of podCIDR and overlay networking.")
|
||||||
}
|
}
|
||||||
if klet.kubeClient == nil {
|
if klet.kubeClient == nil {
|
||||||
glog.Infof("Master not setting up flannel overlay")
|
// The master kubelet cannot wait for the flannel daemon because it is responsible
|
||||||
klet.useDefaultOverlay = false
|
// for starting up the flannel server in a static pod. So even though the flannel
|
||||||
|
// daemon runs on the master, it doesn't hold up cluster bootstrap. All the pods
|
||||||
|
// on the master run with host networking, so the master flannel doesn't care
|
||||||
|
// even if the network changes. We only need it for the master proxy.
|
||||||
|
klet.flannelExperimentalOverlay = false
|
||||||
}
|
}
|
||||||
if plug, err := network.InitNetworkPlugin(networkPlugins, networkPluginName, &networkHost{klet}); err != nil {
|
if plug, err := network.InitNetworkPlugin(networkPlugins, networkPluginName, &networkHost{klet}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -656,8 +663,12 @@ type Kubelet struct {
|
|||||||
// oneTimeInitializer is used to initialize modules that are dependent on the runtime to be up.
|
// oneTimeInitializer is used to initialize modules that are dependent on the runtime to be up.
|
||||||
oneTimeInitializer sync.Once
|
oneTimeInitializer sync.Once
|
||||||
|
|
||||||
useDefaultOverlay bool
|
flannelExperimentalOverlay bool
|
||||||
flannelHelper *FlannelHelper
|
|
||||||
|
// TODO: Flannelhelper doesn't store any state, we can instantiate it
|
||||||
|
// on the fly if we're confident the dbus connetions it opens doesn't
|
||||||
|
// put the system under duress.
|
||||||
|
flannelHelper *FlannelHelper
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kl *Kubelet) allSourcesReady() bool {
|
func (kl *Kubelet) allSourcesReady() bool {
|
||||||
@ -2628,7 +2639,7 @@ var oldNodeUnschedulable bool
|
|||||||
func (kl *Kubelet) syncNetworkStatus() {
|
func (kl *Kubelet) syncNetworkStatus() {
|
||||||
var err error
|
var err error
|
||||||
if kl.configureCBR0 {
|
if kl.configureCBR0 {
|
||||||
if kl.useDefaultOverlay {
|
if kl.flannelExperimentalOverlay {
|
||||||
podCIDR, err := kl.flannelHelper.Handshake()
|
podCIDR, err := kl.flannelHelper.Handshake()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Infof("Flannel server handshake failed %v", err)
|
glog.Infof("Flannel server handshake failed %v", err)
|
||||||
@ -2903,13 +2914,24 @@ func (kl *Kubelet) tryUpdateNodeStatus() error {
|
|||||||
if node == nil {
|
if node == nil {
|
||||||
return fmt.Errorf("no node instance returned for %q", kl.nodeName)
|
return fmt.Errorf("no node instance returned for %q", kl.nodeName)
|
||||||
}
|
}
|
||||||
// TODO: Actually update the node spec with pod cidr, this is currently a no-op.
|
// Flannel is the authoritative source of pod CIDR, if it's running.
|
||||||
if kl.useDefaultOverlay {
|
// This is a short term compromise till we get flannel working in
|
||||||
node.Spec.PodCIDR = kl.runtimeState.podCIDR()
|
// reservation mode.
|
||||||
|
if kl.flannelExperimentalOverlay {
|
||||||
|
flannelPodCIDR := kl.runtimeState.podCIDR()
|
||||||
|
if node.Spec.PodCIDR != flannelPodCIDR {
|
||||||
|
node.Spec.PodCIDR = flannelPodCIDR
|
||||||
|
glog.Infof("Updating podcidr to %v", node.Spec.PodCIDR)
|
||||||
|
if updatedNode, err := kl.kubeClient.Nodes().Update(node); err != nil {
|
||||||
|
glog.Warningf("Failed to update podCIDR: %v", err)
|
||||||
|
} else {
|
||||||
|
// Update the node resourceVersion so the status update doesn't fail.
|
||||||
|
node = updatedNode
|
||||||
|
}
|
||||||
|
}
|
||||||
} else if kl.reconcileCIDR {
|
} else if kl.reconcileCIDR {
|
||||||
kl.runtimeState.setPodCIDR(node.Spec.PodCIDR)
|
kl.runtimeState.setPodCIDR(node.Spec.PodCIDR)
|
||||||
}
|
}
|
||||||
glog.Infof("Updating node in apiserver with cidr %v", node.Spec.PodCIDR)
|
|
||||||
|
|
||||||
if err := kl.setNodeStatus(node); err != nil {
|
if err := kl.setNodeStatus(node); err != nil {
|
||||||
return err
|
return err
|
||||||
|
Loading…
Reference in New Issue
Block a user