Remove the old flannel network mode in kube-up

This commit is contained in:
Lucas Käldström 2016-10-04 12:02:25 +03:00
parent 348717c50a
commit 950e23740b
11 changed files with 5 additions and 360 deletions

View File

@ -148,7 +148,7 @@ KUBE_UP_AUTOMATIC_CLEANUP=${KUBE_UP_AUTOMATIC_CLEANUP:-false}
STORAGE_BACKEND=${STORAGE_BACKEND:-etcd2}
# Networking plugin specific settings.
NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, opencontrail, flannel, kubenet
NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, opencontrail, kubenet
OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}"
OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}"
OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}"

View File

@ -178,7 +178,7 @@ TEST_CLUSTER="${TEST_CLUSTER:-true}"
STORAGE_BACKEND=${STORAGE_BACKEND:-etcd2}
# OpenContrail networking plugin specific settings
NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, opencontrail, flannel, kubenet
NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, opencontrail, kubenet
OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}"
OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}"
OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}"

View File

@ -1,112 +0,0 @@
{
"kind": "Pod",
"apiVersion": "v1",
"metadata": {
"name": "flannel-server",
"namespace": "kube-system",
"labels": {
"app": "flannel-server",
"version": "v0.1"
}
},
"spec": {
"volumes": [
{
"name": "varlog",
"hostPath": {
"path": "/var/log"
}
},
{
"name": "etcdstorage",
"hostPath": {
"path": "/var/etcd-flannel"
}
},
{
"name": "networkconfig",
"hostPath": {
"path": "/etc/kubernetes/network.json"
}
}
],
"containers": [
{
"name": "flannel-server-helper",
"image": "gcr.io/google_containers/flannel-server-helper:0.1",
"args": [
"--network-config=/etc/kubernetes/network.json",
"--etcd-prefix=/kubernetes.io/network",
"--etcd-server=http://127.0.0.1:{{ etcd_port }}"
],
"volumeMounts": [
{
"name": "networkconfig",
"mountPath": "/etc/kubernetes/network.json"
}
],
"imagePullPolicy": "Always"
},
{
"name": "flannel-container",
"image": "quay.io/coreos/flannel:0.5.5",
"command": [
"/bin/sh",
"-c",
"/opt/bin/flanneld -listen 0.0.0.0:10253 -etcd-endpoints http://127.0.0.1:{{ etcd_port }} -etcd-prefix /kubernetes.io/network 1>>/var/log/flannel_server.log 2>&1"
],
"ports": [
{
"hostPort": 10253,
"containerPort": 10253
}
],
"resources": {
"requests": {
"cpu": {{ cpulimit }}
}
},
"volumeMounts": [
{
"name": "varlog",
"mountPath": "/var/log"
}
]
},
{
"name": "etcd-container",
"image": "gcr.io/google_containers/etcd:2.2.1",
"command": [
"/bin/sh",
"-c",
"/usr/local/bin/etcd --listen-peer-urls http://127.0.0.1:{{ etcd_peer_port }} --advertise-client-urls http://127.0.0.1:{{ etcd_port }} --listen-client-urls http://127.0.0.1:{{ etcd_port }} --data-dir /var/etcd-flannel/data 1>>/var/log/etcd_flannel.log 2>&1"
],
"livenessProbe": {
"httpGet": {
"host": "127.0.0.1",
"port": {{ etcd_port }},
"path": "/health"
},
"initialDelaySeconds": 15,
"timeoutSeconds": 15
},
"resources": {
"requests": {
"cpu": {{ cpulimit }}
}
},
"volumeMounts": [
{
"name": "varlog",
"mountPath": "/var/log"
},
{
"name": "etcdstorage",
"mountPath": "/var/etcd-flannel"
}
]
}
],
"hostNetwork": true
}
}

View File

@ -1,39 +0,0 @@
touch /var/log/flannel.log:
cmd.run:
- creates: /var/log/flannel.log
touch /var/log/etcd_flannel.log:
cmd.run:
- creates: /var/log/etcd_flannel.log
/var/etcd-flannel:
file.directory:
- user: root
- group: root
- dir_mode: 700
- recurse:
- user
- group
- mode
/etc/kubernetes/network.json:
file.managed:
- source: salt://flannel-server/network.json
- makedirs: True
- user: root
- group: root
- mode: 755
/etc/kubernetes/manifests/flannel-server.manifest:
file.managed:
- source: salt://flannel-server/flannel-server.manifest
- user: root
- group: root
- mode: 644
- makedirs: true
- dir_mode: 755
- template: jinja
- context:
etcd_port: 4003
etcd_peer_port: 2382
cpulimit: '"100m"'

View File

@ -1,8 +0,0 @@
{
"Network": "172.16.0.0/12",
"SubnetLen": 24,
"Backend": {
"Type": "vxlan",
"VNI": 1
}
}

View File

@ -1,6 +0,0 @@
{% if grains.api_servers is defined -%}
{% set daemon_args = "-remote " + grains.api_servers + ":10253" -%}
{% else -%}
{% set daemon_args = "-remote 127.0.0.1:10253" -%}
{% endif -%}
DAEMON_ARGS="{{daemon_args}}"

View File

@ -1,44 +0,0 @@
# TODO: Run flannel daemon in a static pod once we've moved the overlay network
# setup into a network plugin.
flannel-tar:
archive:
- extracted
- user: root
- name: /usr/local/src
- makedirs: True
- source: https://storage.googleapis.com/kubernetes-release/flannel/flannel-0.5.5-linux-amd64.tar.gz
- tar_options: v
- source_hash: md5=972c717254775bef528f040af804f2cc
- archive_format: tar
- if_missing: /usr/local/src/flannel/flannel-0.5.5/
flannel-symlink:
file.symlink:
- name: /usr/local/bin/flanneld
- target: /usr/local/src/flannel-0.5.5/flanneld
- force: true
- watch:
- archive: flannel-tar
/etc/default/flannel:
file.managed:
- source: salt://flannel/default
- template: jinja
- user: root
- group: root
- mode: 644
/etc/init.d/flannel:
file.managed:
- source: salt://flannel/initd
- user: root
- group: root
- mode: 755
flannel:
service.running:
- enable: True
- watch:
- file: /usr/local/bin/flanneld
- file: /etc/init.d/flannel
- file: /etc/default/flannel

View File

@ -1,126 +0,0 @@
#!/bin/bash
#
### BEGIN INIT INFO
# Provides: flanneld
# Required-Start: $local_fs $network $syslog
# Required-Stop:
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Flannel daemon
# Description:
# Flannel daemon.
### END INIT INFO
# PATH should only include /usr/* if it runs after the mountnfs.sh script
PATH=/sbin:/usr/sbin:/bin:/usr/bin
DESC="Flannel overlay network daemon"
NAME=flannel
DAEMON=/usr/local/bin/flanneld
DAEMON_ARGS="--ip-masq"
DAEMON_LOG_FILE=/var/log/$NAME.log
PIDFILE=/var/run/$NAME.pid
SCRIPTNAME=/etc/init.d/$NAME
DAEMON_USER=root
# Exit if the package is not installed
[ -x "$DAEMON" ] || exit 0
# Read configuration variable file if it is present
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
# Define LSB log_* functions.
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
# and status_of_proc is working.
. /lib/lsb/init-functions
#
# Function that starts the daemon/service
#
do_start()
{
# Avoid a potential race at boot time when both monit and init.d start
# the same service
PIDS=$(pidof $DAEMON)
for PID in ${PIDS}; do
kill -9 $PID
done
# Return
# 0 if daemon has been started
# 1 if daemon was already running
# 2 if daemon could not be started
start-stop-daemon --start --quiet --background --no-close \
--make-pidfile --pidfile $PIDFILE \
--exec $DAEMON -c $DAEMON_USER --test > /dev/null \
|| return 1
start-stop-daemon --start --quiet --background --no-close \
--make-pidfile --pidfile $PIDFILE \
--exec $DAEMON -c $DAEMON_USER -- \
$DAEMON_ARGS >> $DAEMON_LOG_FILE 2>&1 \
|| return 2
}
#
# Function that stops the daemon/service
#
do_stop()
{
# Return
# 0 if daemon has been stopped
# 1 if daemon was already stopped
# 2 if daemon could not be stopped
# other if a failure occurred
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME
RETVAL="$?"
[ "$RETVAL" = 2 ] && return 2
# Many daemons don't delete their pidfiles when they exit.
rm -f $PIDFILE
return "$RETVAL"
}
case "$1" in
start)
log_daemon_msg "Starting $DESC" "$NAME"
do_start
case "$?" in
0|1) log_end_msg 0 || exit 0 ;;
2) log_end_msg 1 || exit 1 ;;
esac
;;
stop)
log_daemon_msg "Stopping $DESC" "$NAME"
do_stop
case "$?" in
0|1) log_end_msg 0 ;;
2) exit 1 ;;
esac
;;
status)
status_of_proc -p $PIDFILE "$DAEMON" "$NAME" && exit 0 || exit $?
;;
restart|force-reload)
log_daemon_msg "Restarting $DESC" "$NAME"
do_stop
case "$?" in
0|1)
do_start
case "$?" in
0) log_end_msg 0 ;;
1) log_end_msg 1 ;; # Old process is still running
*) log_end_msg 1 ;; # Failed to start
esac
;;
*)
# Failed to stop
log_end_msg 1
;;
esac
;;
*)
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
exit 3
;;
esac

View File

@ -14,11 +14,7 @@
{% if pillar['service_cluster_ip_range'] is defined and pillar['service_cluster_ip_range'] != "" -%}
{% set service_cluster_ip_range = "--service_cluster_ip_range=" + pillar['service_cluster_ip_range'] -%}
{% endif -%}
# When we're using flannel it is responsible for cidr allocation.
# This is expected to be a short-term compromise.
{% if pillar.get('network_provider', '').lower() == 'flannel' %}
{% set allocate_node_cidrs = "--allocate-node-cidrs=false" -%}
{% elif pillar.get('network_provider', '').lower() == 'kubenet' %}
{% if pillar.get('network_provider', '').lower() == 'kubenet' %}
{% set allocate_node_cidrs = "--allocate-node-cidrs=true" -%}
{% elif pillar['allocate_node_cidrs'] is defined -%}
{% set allocate_node_cidrs = "--allocate-node-cidrs=" + pillar['allocate_node_cidrs'] -%}

View File

@ -97,16 +97,6 @@
{% set non_masquerade_cidr = "--non-masquerade-cidr=" + pillar.non_masquerade_cidr -%}
{% endif -%}
# The master kubelet cannot wait for the flannel daemon because it is responsible
# for starting up the flannel server in a static pod. So even though the flannel
# daemon runs on the master, it doesn't hold up cluster bootstrap. All the pods
# on the master run with host networking, so the master flannel doesn't care
# even if the network changes. We only need it for the master proxy.
{% set experimental_flannel_overlay = "" -%}
{% if pillar.get('network_provider', '').lower() == 'flannel' and grains['roles'][0] != 'kubernetes-master' %}
{% set experimental_flannel_overlay = "--experimental-flannel-overlay=true" %}
{% endif -%}
# Setup cgroups hierarchies.
{% set cgroup_root = "" -%}
{% set system_container = "" -%}
@ -209,4 +199,4 @@
{% endif -%}
# test_args has to be kept at the end, so they'll overwrite any prior configuration
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{cloud_config}} {{config}} {{manifest_url}} --allow-privileged={{pillar['allow_privileged']}} {{log_level}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{non_masquerade_cidr}} {{cgroup_root}} {{system_container}} {{pod_cidr}} {{ master_kubelet_args }} {{cpu_cfs_quota}} {{network_plugin}} {{kubelet_port}} {{experimental_flannel_overlay}} {{ reconcile_cidr_args }} {{ hairpin_mode }} {{enable_custom_metrics}} {{runtime_container}} {{kubelet_container}} {{node_labels}} {{babysit_daemons}} {{eviction_hard}} {{feature_gates}} {{test_args}}"
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{cloud_config}} {{config}} {{manifest_url}} --allow-privileged={{pillar['allow_privileged']}} {{log_level}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{non_masquerade_cidr}} {{cgroup_root}} {{system_container}} {{pod_cidr}} {{ master_kubelet_args }} {{cpu_cfs_quota}} {{network_plugin}} {{kubelet_port}} {{ reconcile_cidr_args }} {{ hairpin_mode }} {{enable_custom_metrics}} {{runtime_container}} {{kubelet_container}} {{node_labels}} {{babysit_daemons}} {{eviction_hard}} {{feature_gates}} {{test_args}}"

View File

@ -13,9 +13,6 @@ base:
'roles:kubernetes-pool':
- match: grain
- docker
{% if pillar.get('network_provider', '').lower() == 'flannel' %}
- flannel
{% endif %}
{% if pillar.get('network_policy_provider', '').lower() == 'calico' %}
- cni
{% elif pillar.get('network_provider', '').lower() == 'kubenet' %}
@ -58,10 +55,7 @@ base:
- match: grain
- generate-cert
- etcd
{% if pillar.get('network_provider', '').lower() == 'flannel' %}
- flannel-server
- flannel
{% elif pillar.get('network_provider', '').lower() == 'kubenet' %}
{% if pillar.get('network_provider', '').lower() == 'kubenet' %}
- cni
{% elif pillar.get('network_provider', '').lower() == 'cni' %}
- cni