1
0
mirror of https://github.com/rancher/rke.git synced 2025-08-31 06:34:03 +00:00

Add initial support for additional network plugins

Refactor DeployNetworkPlugin()
Add calico network plugin
Add canal network plugin
This commit is contained in:
moelsayed
2017-11-22 01:49:30 +02:00
parent 0b5e65274c
commit 42b22e69b0
7 changed files with 776 additions and 13 deletions

View File

@@ -22,10 +22,7 @@ func (c *Cluster) ClusterDown() error {
}
// Clean up all hosts
if err := cleanUpHosts(c.ControlPlaneHosts, c.WorkerHosts, c.EtcdHosts); err != nil {
return err
}
return nil
return cleanUpHosts(c.ControlPlaneHosts, c.WorkerHosts, c.EtcdHosts)
}
func cleanUpHosts(cpHosts, workerHosts, etcdHosts []hosts.Host) error {

View File

@@ -1,7 +1,11 @@
package cluster
import (
"fmt"
"github.com/rancher/rke/network"
"github.com/rancher/rke/pki"
"github.com/rancher/rke/services"
"github.com/sirupsen/logrus"
)
@@ -11,12 +15,43 @@ const (
func (c *Cluster) DeployNetworkPlugin() error {
logrus.Infof("[network] Setting up network plugin: %s", c.Network.Plugin)
pluginYaml := network.GetFlannelManifest(c.ClusterCIDR)
if err := c.doAddonDeploy(pluginYaml, NetworkPluginResourceName); err != nil {
return err
switch c.Network.Plugin {
case "flannel":
return c.doFlannelDeploy()
case "calico":
return c.doCalicoDeploy()
case "canal":
return c.doCanalDeploy()
default:
return fmt.Errorf("[network] Unsupported network plugin: %s", c.Network.Plugin)
}
logrus.Infof("[network] Network plugin deployed successfully..")
return nil
}
func (c *Cluster) doFlannelDeploy() error {
pluginYaml := network.GetFlannelManifest(c.ClusterCIDR)
return c.doAddonDeploy(pluginYaml, NetworkPluginResourceName)
}
func (c *Cluster) doCalicoDeploy() error {
calicoConfig := make(map[string]string)
calicoConfig["etcdEndpoints"] = services.GetEtcdConnString(c.EtcdHosts)
calicoConfig["apiRoot"] = "https://127.0.0.1:6443"
calicoConfig["clientCrt"] = pki.KubeNodeCertPath
calicoConfig["clientKey"] = pki.KubeNodeKeyPath
calicoConfig["clientCA"] = pki.CACertPath
calicoConfig["kubeCfg"] = pki.KubeNodeConfigPath
calicoConfig["clusterCIDR"] = c.ClusterCIDR
pluginYaml := network.GetCalicoManifest(calicoConfig)
return c.doAddonDeploy(pluginYaml, NetworkPluginResourceName)
}
func (c *Cluster) doCanalDeploy() error {
canalConfig := make(map[string]string)
canalConfig["clientCrt"] = pki.KubeNodeCertPath
canalConfig["clientKey"] = pki.KubeNodeKeyPath
canalConfig["clientCA"] = pki.CACertPath
canalConfig["kubeCfg"] = pki.KubeNodeConfigPath
canalConfig["clusterCIDR"] = c.ClusterCIDR
pluginYaml := network.GetCanalManifest(calicoConfig)
return c.doAddonDeploy(pluginYaml, NetworkPluginResourceName)
}

View File

@@ -22,6 +22,7 @@ const (
ToCleanSSLDir = "/etc/kubernetes/ssl"
ToCleanCNIConf = "/etc/cni"
ToCleanCNIBin = "/opt/cni"
ToCleanCalicoRun = "/var/run/calico"
CleanerContainerName = "kube-cleaner"
CleanerImage = "alpine:latest"
)
@@ -33,6 +34,7 @@ func (h *Host) CleanUp() error {
ToCleanSSLDir,
ToCleanCNIConf,
ToCleanCNIBin,
ToCleanCalicoRun,
}
logrus.Infof("[down] Running cleaner container on host [%s]", h.AdvertisedHostname)
imageCfg, hostCfg := buildCleanerConfig(h, toCleanDirs)

399
network/calico.go Normal file
View File

@@ -0,0 +1,399 @@
package network
func GetCalicoManifest(calicoConfig map[string]string) string {
return `# Calico Version master
# https://docs.projectcalico.org/master/releases#master
# This manifest includes the following component versions:
# calico/node:master
# calico/cni:master
# calico/kube-controllers:master
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# Configure this with the location of your etcd cluster.
etcd_endpoints: "` + calicoConfig["etcdEndpoints"] + `"
# Configure the Calico backend to use.
calico_backend: "bird"
# The CNI network configuration to install on each node.
cni_network_config: |-
{
"name": "rke-pod-network",
"cniVersion": "0.3.0",
"plugins": [
{
"type": "calico",
"etcd_endpoints": "` + calicoConfig["etcdEndpoints"] + `",
"etcd_key_file": "",
"etcd_cert_file": "",
"etcd_ca_cert_file": "",
"log_level": "info",
"mtu": 1500,
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s",
"k8s_api_root": "` + calicoConfig["apiRoot"] + `",
"k8s_client_certificate": "` + calicoConfig["clientCrt"] + `",
"k8s_client_key": "` + calicoConfig["clientKey"] + `",
"k8s_certificate_authority": "` + calicoConfig["clientCA"] + `"
},
"kubernetes": {
"kubeconfig": "` + calicoConfig["kubeCfg"] + `"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
}
]
}
# If you're using TLS enabled etcd uncomment the following.
# You must also populate the Secret below with these files.
etcd_ca: "" # "/calico-secrets/etcd-ca"
etcd_cert: "" # "/calico-secrets/etcd-cert"
etcd_key: "" # "/calico-secrets/etcd-key"
---
# The following contains k8s Secrets for use with a TLS enabled etcd cluster.
# For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: calico-etcd-secrets
namespace: kube-system
data:
# Populate the following files with etcd TLS configuration if desired, but leave blank if
# not using TLS for etcd.
# This self-hosted install expects three files with the following names. The values
# should be base64 encoded strings of the entire contents of each file.
# etcd-key: null
# etcd-cert: null
# etcd-ca: null
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
template:
metadata:
labels:
k8s-app: calico-node
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
hostNetwork: true
serviceAccountName: calico-node
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
tolerations:
- key: "dedicated"
value: "master"
effect: "NoSchedule"
- key: "CriticalAddonsOnly"
operator: "Exists"
- key: "node-role.kubernetes.io/master"
operator: "Exists"
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v2.6.2
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Choose the backend to use.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "k8s,bgp"
# Disable file logging so "kubectl logs" works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Configure the IP Pool from which Pod IPs will be chosen.
- name: CALICO_IPV4POOL_CIDR
value: "` + calicoConfig["clusterCIDR"] + `"
- name: CALICO_IPV4POOL_IPIP
value: "Always"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Set Felix logging to "info"
- name: FELIX_LOGSEVERITYSCREEN
value: "info"
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
value: "1440"
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_cert
# Auto-detect the BGP IP address.
- name: IP
value: ""
- name: FELIX_HEALTHENABLED
value: "true"
securityContext:
privileged: true
resources:
requests:
cpu: 250m
livenessProbe:
httpGet:
path: /liveness
port: 9099
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
readinessProbe:
httpGet:
path: /readiness
port: 9099
periodSeconds: 10
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /calico-secrets
name: etcd-certs
- mountPath: /etc/kubernetes
name: etc-kubernetes
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: quay.io/calico/cni:v1.11.0
command: ["/install-cni.sh"]
env:
# Name of the CNI config file to create.
- name: CNI_CONF_NAME
value: "10-calico.conflist"
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
- mountPath: /calico-secrets
name: etcd-certs
- mountPath: /etc/kubernetes
name: etc-kubernetes
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Mount in the etcd TLS secrets.
- name: etcd-certs
secret:
secretName: calico-etcd-secrets
- name: etc-kubernetes
hostPath:
path: /etc/kubernetes
---
# This manifest deploys the Calico Kubernetes controllers.
# See https://github.com/projectcalico/kube-controllers
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
# The controllers can only have a single active instance.
replicas: 1
strategy:
type: Recreate
template:
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
# The controllers must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
serviceAccountName: calico-kube-controllers
tolerations:
- key: "dedicated"
value: "master"
effect: "NoSchedule"
- key: "CriticalAddonsOnly"
operator: "Exists"
- key: "node-role.kubernetes.io/master"
operator: "Exists"
containers:
- name: calico-kube-controllers
image: quay.io/calico/kube-controllers:v1.0.0
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_cert
volumeMounts:
# Mount in the etcd TLS secrets.
- mountPath: /calico-secrets
name: etcd-certs
- mountPath: /etc/kubernetes
name: etc-kubernetes
volumes:
# Mount in the etcd TLS secrets.
- name: etcd-certs
secret:
secretName: calico-etcd-secrets
- name: etc-kubernetes
hostPath:
path: /etc/kubernetes
---
# This deployment turns off the old "policy-controller". It should remain at 0 replicas, and then
# be removed entirely once the new kube-controllers deployment has been deployed above.
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy
spec:
# Turn this deployment off in favor of the kube-controllers deployment above.
replicas: 0
strategy:
type: Recreate
template:
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy
spec:
hostNetwork: true
serviceAccountName: calico-kube-controllers
containers:
- name: calico-policy-controller
image: quay.io/calico/kube-controllers:v1.0.0
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-kube-controllers
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-node
namespace: kube-system
`
}

330
network/canal.go Normal file
View File

@@ -0,0 +1,330 @@
package network
func GetCanalManifest(canalConfig map[string]string) string {
return `# This ConfigMap can be used to configure a self-hosted Canal installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: canal-config
namespace: kube-system
data:
# The interface used by canal for host <-> host communication.
# If left blank, then the interface is chosen using the node's
# default route.
canal_iface: ""
# Whether or not to masquerade traffic to destinations not within
# the pod network.
masquerade: "true"
# The CNI network configuration to install on each node.
cni_network_config: |-
{
"name": "rke-pod-network",
"cniVersion": "0.3.0",
"plugins": [
{
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"policy": {
"type": "k8s",
"k8s_api_root": "` + canalConfig["apiRoot"] + `",
"k8s_client_certificate": "` + canalConfig["clientCrt"] + `",
"k8s_client_key": "` + canalConfig["clientKey"] + `",
"k8s_certificate_authority": "` + canalConfig["clientCA"] + `"
},
"kubernetes": {
"kubeconfig": "` + canalConfig["kubeCfg"] + `"
}
},
{
"type": "portmap",
"capabilities": {"portMappings": true},
"snat": true
}
]
}
# Flannel network configuration. Mounted into the flannel container.
net-conf.json: |
{
"Network": "` + canalConfig["clusterCIDR"] + `",
"Backend": {
"Type": "vxlan"
}
}
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: canal
namespace: kube-system
labels:
k8s-app: canal
spec:
selector:
matchLabels:
k8s-app: canal
template:
metadata:
labels:
k8s-app: canal
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
hostNetwork: true
serviceAccountName: canal
tolerations:
# this taint is set by all kubelets running '--cloud-provider=external'
# so we should tolerate it to schedule the canal pods
- key: node.cloudprovider.kubernetes.io/uninitialized
value: "true"
effect: NoSchedule
# Allow the pod to run on the master. This is required for
# the master to communicate with pods.
- key: "node-role.kubernetes.io/master"
operator: "Exists"
# Mark the pod as a critical add-on for rescheduling.
- key: "CriticalAddonsOnly"
operator: "Exists"
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v2.6.2
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Enable felix logging.
- name: FELIX_LOGSEVERITYSYS
value: "info"
# Don't enable BGP.
- name: CALICO_NETWORKING_BACKEND
value: "none"
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "k8s,canal"
# Disable file logging so 'kubectl logs' works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Period, in seconds, at which felix re-applies all iptables state
- name: FELIX_IPTABLESREFRESHINTERVAL
value: "60"
# Disable IPV6 support in Felix.
- name: FELIX_IPV6SUPPORT
value: "false"
# Wait for the datastore.
- name: WAIT_FOR_DATASTORE
value: "true"
# No IP address needed.
- name: IP
value: ""
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
- name: FELIX_HEALTHENABLED
value: "true"
securityContext:
privileged: true
resources:
requests:
cpu: 250m
livenessProbe:
httpGet:
path: /liveness
port: 9099
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
readinessProbe:
httpGet:
path: /readiness
port: 9099
periodSeconds: 10
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /etc/kubernetes
name: etc-kubernetes
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: quay.io/calico/cni:v1.11.0
command: ["/install-cni.sh"]
env:
- name: CNI_CONF_NAME
value: "10-calico.conflist"
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: canal-config
key: cni_network_config
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
- mountPath: /etc/kubernetes
name: etc-kubernetes
# This container runs flannel using the kube-subnet-mgr backend
# for allocating subnets.
- name: kube-flannel
image: quay.io/coreos/flannel:v0.9.1
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: FLANNELD_IFACE
valueFrom:
configMapKeyRef:
name: canal-config
key: canal_iface
- name: FLANNELD_IP_MASQ
valueFrom:
configMapKeyRef:
name: canal-config
key: masquerade
volumeMounts:
- name: run
mountPath: /run
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Used by flannel.
- name: run
hostPath:
path: /run
- name: flannel-cfg
configMap:
name: canal-config
- name: etc-kubernetes
hostPath:
path: /etc/kubernetes
# Create all the CustomResourceDefinitions needed for
# Calico policy-only mode.
---
apiVersion: apiextensions.k8s.io/v1beta1
description: Calico Global Felix Configuration
kind: CustomResourceDefinition
metadata:
name: globalfelixconfigs.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalFelixConfig
plural: globalfelixconfigs
singular: globalfelixconfig
---
apiVersion: apiextensions.k8s.io/v1beta1
description: Calico Global BGP Configuration
kind: CustomResourceDefinition
metadata:
name: globalbgpconfigs.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalBGPConfig
plural: globalbgpconfigs
singular: globalbgpconfig
---
apiVersion: apiextensions.k8s.io/v1beta1
description: Calico IP Pools
kind: CustomResourceDefinition
metadata:
name: ippools.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPPool
plural: ippools
singular: ippool
---
apiVersion: apiextensions.k8s.io/v1beta1
description: Calico Global Network Policies
kind: CustomResourceDefinition
metadata:
name: globalnetworkpolicies.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkPolicy
plural: globalnetworkpolicies
singular: globalnetworkpolicy
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: canal
namespace: kube-system
`
}

View File

@@ -78,7 +78,7 @@ func buildEtcdConfig(host hosts.Host, etcdService v1.ETCDService, initCluster st
return imageCfg, hostCfg
}
func getEtcdConnString(hosts []hosts.Host) string {
func GetEtcdConnString(hosts []hosts.Host) string {
connString := ""
for i, host := range hosts {
connString += "http://" + host.AdvertiseAddress + ":2379"

View File

@@ -13,7 +13,7 @@ import (
)
func runKubeAPI(host hosts.Host, etcdHosts []hosts.Host, kubeAPIService v1.KubeAPIService) error {
etcdConnString := getEtcdConnString(etcdHosts)
etcdConnString := GetEtcdConnString(etcdHosts)
imageCfg, hostCfg := buildKubeAPIConfig(host, kubeAPIService, etcdConnString)
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeAPIContainerName, host.AdvertisedHostname, ControlRole)
}