mirror of
https://github.com/rancher/rke.git
synced 2025-09-16 15:10:12 +00:00
Merge pull request #181 from moelsayed/use_templates
Use Go templates for addons, network plugins and other manifests
This commit is contained in:
@@ -1,33 +1,12 @@
|
||||
package addons
|
||||
|
||||
func GetAddonsExcuteJob(addonName, nodeName, image string) string {
|
||||
return `apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: ` + addonName + `-deploy-job
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
name: pi
|
||||
spec:
|
||||
hostNetwork: true
|
||||
serviceAccountName: rke-job-deployer
|
||||
nodeName: ` + nodeName + `
|
||||
containers:
|
||||
- name: ` + addonName + `-pod
|
||||
image: ` + image + `
|
||||
command: [ "kubectl", "apply", "-f" , "/etc/config/` + addonName + `.yaml"]
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/config
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
# Provide the name of the ConfigMap containing the files you want
|
||||
# to add to the container
|
||||
name: ` + addonName + `
|
||||
items:
|
||||
- key: ` + addonName + `
|
||||
path: ` + addonName + `.yaml
|
||||
restartPolicy: Never`
|
||||
import "github.com/rancher/rke/templates"
|
||||
|
||||
func GetAddonsExcuteJob(addonName, nodeName, image string) (string, error) {
|
||||
jobConfig := map[string]string{
|
||||
"AddonName": addonName,
|
||||
"NodeName": nodeName,
|
||||
"Image": image,
|
||||
}
|
||||
return templates.CompileTemplateFromMap(templates.JobDeployerTemplate, jobConfig)
|
||||
}
|
||||
|
@@ -17,10 +17,13 @@ const (
|
||||
)
|
||||
|
||||
func TestJobManifest(t *testing.T) {
|
||||
jobYaml := GetAddonsExcuteJob(FakeAddonName, FakeNodeName, FakeAddonImage)
|
||||
jobYaml, err := GetAddonsExcuteJob(FakeAddonName, FakeNodeName, FakeAddonImage)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get addon execute job: %v", err)
|
||||
}
|
||||
job := v1.Job{}
|
||||
decoder := yamlutil.NewYAMLToJSONDecoder(bytes.NewReader([]byte(jobYaml)))
|
||||
err := decoder.Decode(&job)
|
||||
err = decoder.Decode(&job)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed To decode Job yaml: %v", err)
|
||||
}
|
||||
|
@@ -1,231 +1,17 @@
|
||||
package addons
|
||||
|
||||
import "github.com/rancher/rke/templates"
|
||||
|
||||
const (
|
||||
KubeDNSImage = "kubeDNSImage"
|
||||
KubeDNSImage = "KubeDNSImage"
|
||||
DNSMasqImage = "DNSMasqImage"
|
||||
KubeDNSSidecarImage = "kubednsSidecarImage"
|
||||
KubeDNSAutoScalerImage = "kubeDNSAutoScalerImage"
|
||||
KubeDNSServer = "clusterDNSServer"
|
||||
KubeDNSClusterDomain = "clusterDomain"
|
||||
KubeDNSSidecarImage = "KubednsSidecarImage"
|
||||
KubeDNSAutoScalerImage = "KubeDNSAutoScalerImage"
|
||||
KubeDNSServer = "ClusterDNSServer"
|
||||
KubeDNSClusterDomain = "ClusterDomain"
|
||||
)
|
||||
|
||||
func GetKubeDNSManifest(kubeDNSConfig map[string]string) string {
|
||||
return `---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kube-dns-autoscaler
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns-autoscaler
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns-autoscaler
|
||||
spec:
|
||||
containers:
|
||||
- name: autoscaler
|
||||
image: ` + kubeDNSConfig[KubeDNSAutoScalerImage] + `
|
||||
resources:
|
||||
requests:
|
||||
cpu: "20m"
|
||||
memory: "10Mi"
|
||||
command:
|
||||
- /cluster-proportional-autoscaler
|
||||
- --namespace=kube-system
|
||||
- --configmap=kube-dns-autoscaler
|
||||
- --target=Deployment/kube-dns
|
||||
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
|
||||
# If using small nodes, "nodesPerReplica" should dominate.
|
||||
- --default-params={"linear":{"coresPerReplica":128,"nodesPerReplica":4,"min":1}}
|
||||
- --logtostderr=true
|
||||
- --v=2
|
||||
func GetKubeDNSManifest(kubeDNSConfig map[string]string) (string, error) {
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
# replicas: not specified here:
|
||||
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
|
||||
# 2. Default is 1.
|
||||
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 10%
|
||||
maxUnavailable: 0
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-dns
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
volumes:
|
||||
- name: kube-dns-config
|
||||
configMap:
|
||||
name: kube-dns
|
||||
optional: true
|
||||
containers:
|
||||
- name: kubedns
|
||||
image: ` + kubeDNSConfig[KubeDNSImage] + `
|
||||
resources:
|
||||
# TODO: Set memory limits when we've profiled the container for large
|
||||
# clusters, then set request = limit to keep this container in
|
||||
# guaranteed class. Currently, this container falls into the
|
||||
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
||||
limits:
|
||||
memory: 170Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 70Mi
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/kubedns
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readiness
|
||||
port: 8081
|
||||
scheme: HTTP
|
||||
# we poll on pod startup for the Kubernetes master service and
|
||||
# only setup the /readiness HTTP server once that's available.
|
||||
initialDelaySeconds: 3
|
||||
timeoutSeconds: 5
|
||||
args:
|
||||
- --domain=` + kubeDNSConfig[KubeDNSClusterDomain] + `.
|
||||
- --dns-port=10053
|
||||
- --config-dir=/kube-dns-config
|
||||
- --v=2
|
||||
env:
|
||||
- name: PROMETHEUS_PORT
|
||||
value: "10055"
|
||||
ports:
|
||||
- containerPort: 10053
|
||||
name: dns-local
|
||||
protocol: UDP
|
||||
- containerPort: 10053
|
||||
name: dns-tcp-local
|
||||
protocol: TCP
|
||||
- containerPort: 10055
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: kube-dns-config
|
||||
mountPath: /kube-dns-config
|
||||
- name: dnsmasq
|
||||
image: ` + kubeDNSConfig[DNSMasqImage] + `
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/dnsmasq
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
args:
|
||||
- -v=2
|
||||
- -logtostderr
|
||||
- -configDir=/etc/k8s/dns/dnsmasq-nanny
|
||||
- -restartDnsmasq=true
|
||||
- --
|
||||
- -k
|
||||
- --cache-size=1000
|
||||
- --log-facility=-
|
||||
- --server=/` + kubeDNSConfig[KubeDNSClusterDomain] + `/127.0.0.1#10053
|
||||
- --server=/in-addr.arpa/127.0.0.1#10053
|
||||
- --server=/ip6.arpa/127.0.0.1#10053
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
protocol: UDP
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
|
||||
resources:
|
||||
requests:
|
||||
cpu: 150m
|
||||
memory: 20Mi
|
||||
volumeMounts:
|
||||
- name: kube-dns-config
|
||||
mountPath: /etc/k8s/dns/dnsmasq-nanny
|
||||
- name: sidecar
|
||||
image: ` + kubeDNSConfig[KubeDNSSidecarImage] + `
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
args:
|
||||
- --v=2
|
||||
- --logtostderr
|
||||
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.` + kubeDNSConfig[KubeDNSClusterDomain] + `,5,A
|
||||
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.` + kubeDNSConfig[KubeDNSClusterDomain] + `,5,A
|
||||
ports:
|
||||
- containerPort: 10054
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
resources:
|
||||
requests:
|
||||
memory: 20Mi
|
||||
cpu: 10m
|
||||
dnsPolicy: Default # Don't use cluster DNS.
|
||||
serviceAccountName: kube-dns
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "KubeDNS"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
clusterIP: ` + kubeDNSConfig[KubeDNSServer] + `
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
||||
|
||||
`
|
||||
return templates.CompileTemplateFromMap(templates.KubeDNSTemplate, kubeDNSConfig)
|
||||
}
|
||||
|
@@ -2,6 +2,7 @@ package authz
|
||||
|
||||
import (
|
||||
"github.com/rancher/rke/k8s"
|
||||
"github.com/rancher/rke/templates"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -11,10 +12,10 @@ func ApplyJobDeployerServiceAccount(kubeConfigPath string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := k8s.UpdateClusterRoleBindingFromYaml(k8sClient, jobDeployerClusterRoleBinding); err != nil {
|
||||
if err := k8s.UpdateClusterRoleBindingFromYaml(k8sClient, templates.JobDeployerClusterRoleBinding); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := k8s.UpdateServiceAccountFromYaml(k8sClient, jobDeployerServiceAccount); err != nil {
|
||||
if err := k8s.UpdateServiceAccountFromYaml(k8sClient, templates.JobDeployerServiceAccount); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[authz] rke-job-deployer ServiceAccount created successfully")
|
||||
@@ -27,7 +28,7 @@ func ApplySystemNodeClusterRoleBinding(kubeConfigPath string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := k8s.UpdateClusterRoleBindingFromYaml(k8sClient, systemNodeClusterRoleBinding); err != nil {
|
||||
if err := k8s.UpdateClusterRoleBindingFromYaml(k8sClient, templates.SystemNodeClusterRoleBinding); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[authz] system:node ClusterRoleBinding created successfully")
|
||||
|
@@ -2,6 +2,7 @@ package authz
|
||||
|
||||
import (
|
||||
"github.com/rancher/rke/k8s"
|
||||
"github.com/rancher/rke/templates"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -11,7 +12,7 @@ func ApplyDefaultPodSecurityPolicy(kubeConfigPath string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := k8s.UpdatePodSecurityPolicyFromYaml(k8sClient, DefaultPodSecurityPolicy); err != nil {
|
||||
if err := k8s.UpdatePodSecurityPolicyFromYaml(k8sClient, templates.DefaultPodSecurityPolicy); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[authz] Default PodSecurityPolicy applied successfully")
|
||||
@@ -24,10 +25,10 @@ func ApplyDefaultPodSecurityPolicyRole(kubeConfigPath string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := k8s.UpdateRoleFromYaml(k8sClient, DefaultPodSecurityRole); err != nil {
|
||||
if err := k8s.UpdateRoleFromYaml(k8sClient, templates.DefaultPodSecurityRole); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := k8s.UpdateRoleBindingFromYaml(k8sClient, DefaultPodSecurityRoleBinding); err != nil {
|
||||
if err := k8s.UpdateRoleBindingFromYaml(k8sClient, templates.DefaultPodSecurityRoleBinding); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[authz] Default PodSecurityPolicy Role and RoleBinding applied successfully")
|
||||
|
@@ -44,7 +44,10 @@ func (c *Cluster) deployKubeDNS() error {
|
||||
addons.KubeDNSSidecarImage: c.SystemImages[KubeDNSSidecarImage],
|
||||
addons.KubeDNSAutoScalerImage: c.SystemImages[KubeDNSAutoScalerImage],
|
||||
}
|
||||
kubeDNSYaml := addons.GetKubeDNSManifest(kubeDNSConfig)
|
||||
kubeDNSYaml, err := addons.GetKubeDNSManifest(kubeDNSConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.doAddonDeploy(kubeDNSYaml, KubeDNSAddonResourceName); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -62,7 +65,10 @@ func (c *Cluster) doAddonDeploy(addonYaml, resourceName string) error {
|
||||
|
||||
logrus.Infof("[addons] Executing deploy job..")
|
||||
|
||||
addonJob := addons.GetAddonsExcuteJob(resourceName, c.ControlPlaneHosts[0].HostnameOverride, c.Services.KubeAPI.Image)
|
||||
addonJob, err := addons.GetAddonsExcuteJob(resourceName, c.ControlPlaneHosts[0].HostnameOverride, c.Services.KubeAPI.Image)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to deploy addon execute job: %v", err)
|
||||
}
|
||||
err = c.ApplySystemAddonExcuteJob(addonJob)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to deploy addon execute job: %v", err)
|
||||
|
@@ -3,9 +3,9 @@ package cluster
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/rancher/rke/network"
|
||||
"github.com/rancher/rke/pki"
|
||||
"github.com/rancher/rke/services"
|
||||
"github.com/rancher/rke/templates"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -17,12 +17,12 @@ const (
|
||||
FlannelCNIImage = "flannel_cni_image"
|
||||
FlannelIface = "flannel_iface"
|
||||
|
||||
CalicoNetworkPlugin = "calico"
|
||||
CalicoNodeImage = "calico_node_image"
|
||||
CalicoCNIImage = "calico_cni_image"
|
||||
CalicoControllersImages = "calico_controllers_image"
|
||||
CalicoctlImage = "calicoctl_image"
|
||||
CalicoCloudProvider = "calico_cloud_provider"
|
||||
CalicoNetworkPlugin = "calico"
|
||||
CalicoNodeImage = "calico_node_image"
|
||||
CalicoCNIImage = "calico_cni_image"
|
||||
CalicoControllersImage = "calico_controllers_image"
|
||||
CalicoctlImage = "calicoctl_image"
|
||||
CalicoCloudProvider = "calico_cloud_provider"
|
||||
|
||||
CanalNetworkPlugin = "canal"
|
||||
CanalNodeImage = "canal_node_image"
|
||||
@@ -32,6 +32,35 @@ const (
|
||||
WeaveNetworkPlugin = "weave"
|
||||
WeaveImage = "weave_node_image"
|
||||
WeaveCNIImage = "weave_cni_image"
|
||||
|
||||
// List of map keys to be used with network templates
|
||||
|
||||
// EtcdEndpoints is the server address for Etcd, used by calico
|
||||
EtcdEndpoints = "EtcdEndpoints"
|
||||
// APIRoot is the kubernetes API address
|
||||
APIRoot = "APIRoot"
|
||||
// kubernetes client certificates and kubeconfig paths
|
||||
|
||||
ClientCert = "ClientCert"
|
||||
ClientKey = "ClientKey"
|
||||
ClientCA = "ClientCA"
|
||||
KubeCfg = "KubeCfg"
|
||||
|
||||
ClusterCIDR = "ClusterCIDR"
|
||||
// Images key names
|
||||
|
||||
Image = "Image"
|
||||
CNIImage = "CNIImage"
|
||||
NodeImage = "NodeImage"
|
||||
ControllersImage = "ControllersImage"
|
||||
CanalFlannelImg = "CanalFlannelImg"
|
||||
|
||||
Calicoctl = "Calicoctl"
|
||||
|
||||
FlannelInterface = "FlannelInterface"
|
||||
CloudProvider = "CloudProvider"
|
||||
AWSCloudProvider = "aws"
|
||||
RBACConfig = "RBACConfig"
|
||||
)
|
||||
|
||||
func (c *Cluster) DeployNetworkPlugin() error {
|
||||
@@ -52,60 +81,73 @@ func (c *Cluster) DeployNetworkPlugin() error {
|
||||
|
||||
func (c *Cluster) doFlannelDeploy() error {
|
||||
flannelConfig := map[string]string{
|
||||
network.ClusterCIDR: c.ClusterCIDR,
|
||||
network.FlannelImage: c.Network.Options[FlannelImage],
|
||||
network.FlannelCNIImage: c.Network.Options[FlannelCNIImage],
|
||||
network.FlannelIface: c.Network.Options[FlannelIface],
|
||||
network.RBACConfig: c.Authorization.Mode,
|
||||
ClusterCIDR: c.ClusterCIDR,
|
||||
Image: c.Network.Options[FlannelImage],
|
||||
CNIImage: c.Network.Options[FlannelCNIImage],
|
||||
FlannelInterface: c.Network.Options[FlannelIface],
|
||||
RBACConfig: c.Authorization.Mode,
|
||||
}
|
||||
pluginYaml, err := c.getNetworkPluginManifest(flannelConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pluginYaml := network.GetFlannelManifest(flannelConfig)
|
||||
return c.doAddonDeploy(pluginYaml, NetworkPluginResourceName)
|
||||
}
|
||||
|
||||
func (c *Cluster) doCalicoDeploy() error {
|
||||
calicoConfig := map[string]string{
|
||||
network.EtcdEndpoints: services.GetEtcdConnString(c.EtcdHosts),
|
||||
network.APIRoot: "https://127.0.0.1:6443",
|
||||
network.ClientCert: pki.KubeNodeCertPath,
|
||||
network.ClientKey: pki.KubeNodeKeyPath,
|
||||
network.ClientCA: pki.CACertPath,
|
||||
network.KubeCfg: pki.KubeNodeConfigPath,
|
||||
network.ClusterCIDR: c.ClusterCIDR,
|
||||
network.CNIImage: c.Network.Options[CalicoCNIImage],
|
||||
network.NodeImage: c.Network.Options[CalicoNodeImage],
|
||||
network.ControllersImage: c.Network.Options[CalicoControllersImages],
|
||||
network.CalicoctlImage: c.Network.Options[CalicoctlImage],
|
||||
network.CloudProvider: c.Network.Options[CalicoCloudProvider],
|
||||
network.RBACConfig: c.Authorization.Mode,
|
||||
EtcdEndpoints: services.GetEtcdConnString(c.EtcdHosts),
|
||||
APIRoot: "https://127.0.0.1:6443",
|
||||
ClientCert: pki.KubeNodeCertPath,
|
||||
ClientKey: pki.KubeNodeKeyPath,
|
||||
ClientCA: pki.CACertPath,
|
||||
KubeCfg: pki.KubeNodeConfigPath,
|
||||
ClusterCIDR: c.ClusterCIDR,
|
||||
CNIImage: c.Network.Options[CalicoCNIImage],
|
||||
NodeImage: c.Network.Options[CalicoNodeImage],
|
||||
ControllersImage: c.Network.Options[CalicoControllersImage],
|
||||
Calicoctl: c.Network.Options[CalicoctlImage],
|
||||
CloudProvider: c.Network.Options[CalicoCloudProvider],
|
||||
RBACConfig: c.Authorization.Mode,
|
||||
}
|
||||
pluginYaml, err := c.getNetworkPluginManifest(calicoConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pluginYaml := network.GetCalicoManifest(calicoConfig)
|
||||
return c.doAddonDeploy(pluginYaml, NetworkPluginResourceName)
|
||||
}
|
||||
|
||||
func (c *Cluster) doCanalDeploy() error {
|
||||
canalConfig := map[string]string{
|
||||
network.ClientCert: pki.KubeNodeCertPath,
|
||||
network.ClientKey: pki.KubeNodeKeyPath,
|
||||
network.ClientCA: pki.CACertPath,
|
||||
network.KubeCfg: pki.KubeNodeConfigPath,
|
||||
network.ClusterCIDR: c.ClusterCIDR,
|
||||
network.NodeImage: c.Network.Options[CanalNodeImage],
|
||||
network.CNIImage: c.Network.Options[CanalCNIImage],
|
||||
network.FlannelImage: c.Network.Options[CanalFlannelImage],
|
||||
network.RBACConfig: c.Authorization.Mode,
|
||||
ClientCert: pki.KubeNodeCertPath,
|
||||
APIRoot: "https://127.0.0.1:6443",
|
||||
ClientKey: pki.KubeNodeKeyPath,
|
||||
ClientCA: pki.CACertPath,
|
||||
KubeCfg: pki.KubeNodeConfigPath,
|
||||
ClusterCIDR: c.ClusterCIDR,
|
||||
NodeImage: c.Network.Options[CanalNodeImage],
|
||||
CNIImage: c.Network.Options[CanalCNIImage],
|
||||
CanalFlannelImg: c.Network.Options[CanalFlannelImage],
|
||||
RBACConfig: c.Authorization.Mode,
|
||||
}
|
||||
pluginYaml, err := c.getNetworkPluginManifest(canalConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pluginYaml := network.GetCanalManifest(canalConfig)
|
||||
return c.doAddonDeploy(pluginYaml, NetworkPluginResourceName)
|
||||
}
|
||||
|
||||
func (c *Cluster) doWeaveDeploy() error {
|
||||
weaveConfig := map[string]string{
|
||||
network.ClusterCIDR: c.ClusterCIDR,
|
||||
network.WeaveImage: c.Network.Options[WeaveImage],
|
||||
network.WeaveCNIImage: c.Network.Options[WeaveCNIImage],
|
||||
network.RBACConfig: c.Authorization.Mode,
|
||||
ClusterCIDR: c.ClusterCIDR,
|
||||
Image: c.Network.Options[WeaveImage],
|
||||
CNIImage: c.Network.Options[WeaveCNIImage],
|
||||
RBACConfig: c.Authorization.Mode,
|
||||
}
|
||||
pluginYaml, err := c.getNetworkPluginManifest(weaveConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pluginYaml := network.GetWeaveManifest(weaveConfig)
|
||||
return c.doAddonDeploy(pluginYaml, NetworkPluginResourceName)
|
||||
}
|
||||
|
||||
@@ -117,38 +159,52 @@ func (c *Cluster) setClusterNetworkDefaults() {
|
||||
c.Network.Options = make(map[string]string)
|
||||
}
|
||||
networkPluginConfigDefaultsMap := make(map[string]string)
|
||||
switch {
|
||||
case c.Network.Plugin == FlannelNetworkPlugin:
|
||||
switch c.Network.Plugin {
|
||||
case FlannelNetworkPlugin:
|
||||
networkPluginConfigDefaultsMap = map[string]string{
|
||||
FlannelImage: DefaultFlannelImage,
|
||||
FlannelCNIImage: DefaultFlannelCNIImage,
|
||||
}
|
||||
|
||||
case c.Network.Plugin == CalicoNetworkPlugin:
|
||||
case CalicoNetworkPlugin:
|
||||
networkPluginConfigDefaultsMap = map[string]string{
|
||||
CalicoCNIImage: DefaultCalicoCNIImage,
|
||||
CalicoNodeImage: DefaultCalicoNodeImage,
|
||||
CalicoControllersImages: DefaultCalicoControllersImage,
|
||||
CalicoCloudProvider: DefaultNetworkCloudProvider,
|
||||
CalicoctlImage: DefaultCalicoctlImage,
|
||||
CalicoCNIImage: DefaultCalicoCNIImage,
|
||||
CalicoNodeImage: DefaultCalicoNodeImage,
|
||||
CalicoControllersImage: DefaultCalicoControllersImage,
|
||||
CalicoCloudProvider: DefaultNetworkCloudProvider,
|
||||
CalicoctlImage: DefaultCalicoctlImage,
|
||||
}
|
||||
|
||||
case c.Network.Plugin == CanalNetworkPlugin:
|
||||
case CanalNetworkPlugin:
|
||||
networkPluginConfigDefaultsMap = map[string]string{
|
||||
CanalCNIImage: DefaultCanalCNIImage,
|
||||
CanalNodeImage: DefaultCanalNodeImage,
|
||||
CanalFlannelImage: DefaultCanalFlannelImage,
|
||||
}
|
||||
|
||||
case c.Network.Plugin == WeaveNetworkPlugin:
|
||||
case WeaveNetworkPlugin:
|
||||
networkPluginConfigDefaultsMap = map[string]string{
|
||||
WeaveImage: DefaultWeaveImage,
|
||||
WeaveCNIImage: DefaultWeaveCNIImage,
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range networkPluginConfigDefaultsMap {
|
||||
setDefaultIfEmptyMapValue(c.Network.Options, k, v)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (c *Cluster) getNetworkPluginManifest(pluginConfig map[string]string) (string, error) {
|
||||
switch c.Network.Plugin {
|
||||
case FlannelNetworkPlugin:
|
||||
return templates.CompileTemplateFromMap(templates.FlannelTemplate, pluginConfig)
|
||||
case CalicoNetworkPlugin:
|
||||
return templates.CompileTemplateFromMap(templates.CalicoTemplate, pluginConfig)
|
||||
case CanalNetworkPlugin:
|
||||
return templates.CompileTemplateFromMap(templates.CanalTemplate, pluginConfig)
|
||||
case WeaveNetworkPlugin:
|
||||
return templates.CompileTemplateFromMap(templates.WeaveTemplate, pluginConfig)
|
||||
default:
|
||||
return "", fmt.Errorf("[network] Unsupported network plugin: %s", c.Network.Plugin)
|
||||
}
|
||||
}
|
||||
|
@@ -1,23 +0,0 @@
|
||||
package network
|
||||
|
||||
const (
|
||||
EtcdEndpoints = "etcdEndpoints"
|
||||
APIRoot = "apiRoot"
|
||||
ClientCert = "clientCert"
|
||||
ClientKey = "clientKey"
|
||||
ClientCA = "clientCA"
|
||||
KubeCfg = "kubeCfg"
|
||||
ClusterCIDR = "clusterCIDR"
|
||||
CNIImage = "cniImage"
|
||||
NodeImage = "nodeImage"
|
||||
ControllersImage = "controllersImage"
|
||||
CalicoctlImage = "calicoctlImage"
|
||||
FlannelImage = "flannelImage"
|
||||
FlannelCNIImage = "flannelCNIImage"
|
||||
FlannelIface = "flannelIface"
|
||||
CloudProvider = "cloudprovider"
|
||||
AWSCloudProvider = "aws"
|
||||
RBACConfig = "rbacConfig"
|
||||
WeaveImage = "weaveImage"
|
||||
WeaveCNIImage = "weaveCNIImage"
|
||||
)
|
@@ -1,7 +1,7 @@
|
||||
package authz
|
||||
package templates
|
||||
|
||||
const (
|
||||
systemNodeClusterRoleBinding = `
|
||||
SystemNodeClusterRoleBinding = `
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
@@ -19,14 +19,14 @@ subjects:
|
||||
name: system:nodes
|
||||
apiGroup: rbac.authorization.k8s.io`
|
||||
|
||||
jobDeployerServiceAccount = `
|
||||
JobDeployerServiceAccount = `
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: rke-job-deployer
|
||||
namespace: kube-system`
|
||||
|
||||
jobDeployerClusterRoleBinding = `
|
||||
JobDeployerClusterRoleBinding = `
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
@@ -1,19 +1,79 @@
|
||||
package network
|
||||
package templates
|
||||
|
||||
import "github.com/rancher/rke/services"
|
||||
const CalicoTemplate = `
|
||||
{{if eq .RBACConfig "rbac"}}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: calico-cni-plugin
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico-cni-plugin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico-cni-plugin
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: calico-cni-plugin
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: calico-cni-plugin
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico-kube-controllers
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico-kube-controllers
|
||||
namespace: kube-system
|
||||
|
||||
func GetCalicoManifest(calicoConfig map[string]string) string {
|
||||
awsIPPool := ""
|
||||
if calicoConfig[CloudProvider] == AWSCloudProvider {
|
||||
awsIPPool = getCalicoAWSIPPoolManifest(calicoConfig)
|
||||
}
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
- extensions
|
||||
resources:
|
||||
- pods
|
||||
- namespaces
|
||||
- networkpolicies
|
||||
- nodes
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
namespace: kube-system
|
||||
|
||||
rbacConfig := ""
|
||||
if calicoConfig[RBACConfig] == services.RBACAuthorizationMode {
|
||||
rbacConfig = getCalicoRBACManifest()
|
||||
}
|
||||
## end rbac here
|
||||
{{end}}
|
||||
|
||||
return rbacConfig + `
|
||||
---
|
||||
# Calico Version master
|
||||
# https://docs.projectcalico.org/master/releases#master
|
||||
@@ -30,7 +90,7 @@ metadata:
|
||||
namespace: kube-system
|
||||
data:
|
||||
# Configure this with the location of your etcd cluster.
|
||||
etcd_endpoints: "` + calicoConfig[EtcdEndpoints] + `"
|
||||
etcd_endpoints: "{{.EtcdEndpoints}}"
|
||||
|
||||
# Configure the Calico backend to use.
|
||||
calico_backend: "bird"
|
||||
@@ -43,7 +103,7 @@ data:
|
||||
"plugins": [
|
||||
{
|
||||
"type": "calico",
|
||||
"etcd_endpoints": "` + calicoConfig[EtcdEndpoints] + `",
|
||||
"etcd_endpoints": "{{.EtcdEndpoints}}",
|
||||
"etcd_key_file": "",
|
||||
"etcd_cert_file": "",
|
||||
"etcd_ca_cert_file": "",
|
||||
@@ -54,13 +114,13 @@ data:
|
||||
},
|
||||
"policy": {
|
||||
"type": "k8s",
|
||||
"k8s_api_root": "` + calicoConfig[APIRoot] + `",
|
||||
"k8s_client_certificate": "` + calicoConfig[ClientCert] + `",
|
||||
"k8s_client_key": "` + calicoConfig[ClientKey] + `",
|
||||
"k8s_certificate_authority": "` + calicoConfig[ClientCA] + `"
|
||||
"k8s_api_root": "{{.APIRoot}}",
|
||||
"k8s_client_certificate": "{{.ClientCert}}",
|
||||
"k8s_client_key": "{{.ClientKey}}",
|
||||
"k8s_certificate_authority": "{{.ClientCA}}"
|
||||
},
|
||||
"kubernetes": {
|
||||
"kubeconfig": "` + calicoConfig[KubeCfg] + `"
|
||||
"kubeconfig": "{{.KubeCfg}}"
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -137,7 +197,7 @@ spec:
|
||||
# container programs network policy and routes on each
|
||||
# host.
|
||||
- name: calico-node
|
||||
image: ` + calicoConfig[NodeImage] + `
|
||||
image: {{.NodeImage}}
|
||||
env:
|
||||
# The location of the Calico etcd cluster.
|
||||
- name: ETCD_ENDPOINTS
|
||||
@@ -162,7 +222,7 @@ spec:
|
||||
value: "ACCEPT"
|
||||
# Configure the IP Pool from which Pod IPs will be chosen.
|
||||
- name: CALICO_IPV4POOL_CIDR
|
||||
value: "` + calicoConfig[ClusterCIDR] + `"
|
||||
value: "{{.ClusterCIDR}}"
|
||||
- name: CALICO_IPV4POOL_IPIP
|
||||
value: "Always"
|
||||
# Disable IPv6 on Kubernetes.
|
||||
@@ -228,7 +288,7 @@ spec:
|
||||
# This container installs the Calico CNI binaries
|
||||
# and CNI network config file on each node.
|
||||
- name: install-cni
|
||||
image: ` + calicoConfig[CNIImage] + `
|
||||
image: {{.CNIImage}}
|
||||
command: ["/install-cni.sh"]
|
||||
env:
|
||||
# Name of the CNI config file to create.
|
||||
@@ -317,7 +377,7 @@ spec:
|
||||
operator: "Exists"
|
||||
containers:
|
||||
- name: calico-kube-controllers
|
||||
image: ` + calicoConfig[ControllersImage] + `
|
||||
image: {{.ControllersImage}}
|
||||
env:
|
||||
# The location of the Calico etcd cluster.
|
||||
- name: ETCD_ENDPOINTS
|
||||
@@ -384,7 +444,7 @@ spec:
|
||||
serviceAccountName: calico-kube-controllers
|
||||
containers:
|
||||
- name: calico-policy-controller
|
||||
image: ` + calicoConfig[ControllersImage] + `
|
||||
image: {{.ControllersImage}}
|
||||
env:
|
||||
# The location of the Calico etcd cluster.
|
||||
- name: ETCD_ENDPOINTS
|
||||
@@ -407,12 +467,10 @@ kind: ServiceAccount
|
||||
metadata:
|
||||
name: calico-node
|
||||
namespace: kube-system
|
||||
` + awsIPPool + `
|
||||
`
|
||||
}
|
||||
|
||||
func getCalicoAWSIPPoolManifest(calicoConfig map[string]string) string {
|
||||
return `
|
||||
|
||||
{{if eq .CloudProvider "aws"}}
|
||||
## aws stuff here
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
@@ -424,7 +482,7 @@ data:
|
||||
apiVersion: v1
|
||||
kind: ipPool
|
||||
metadata:
|
||||
cidr: ` + calicoConfig[ClusterCIDR] + `
|
||||
cidr: {{.ClusterCIDR}}
|
||||
spec:
|
||||
nat-outgoing: true
|
||||
---
|
||||
@@ -438,7 +496,7 @@ spec:
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
- name: calicoctl
|
||||
image: ` + calicoConfig[CalicoctlImage] + `
|
||||
image: {{.Calicoctl}}
|
||||
command: ["/bin/sh", "-c", "calicoctl apply -f aws-ippool.yaml"]
|
||||
env:
|
||||
- name: ETCD_ENDPOINTS
|
||||
@@ -456,88 +514,5 @@ spec:
|
||||
items:
|
||||
- key: aws-ippool
|
||||
path: aws-ippool.yaml
|
||||
`
|
||||
}
|
||||
|
||||
func getCalicoRBACManifest() string {
|
||||
return `
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: calico-cni-plugin
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico-cni-plugin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico-cni-plugin
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: calico-cni-plugin
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: calico-cni-plugin
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico-kube-controllers
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico-kube-controllers
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
- extensions
|
||||
resources:
|
||||
- pods
|
||||
- namespaces
|
||||
- networkpolicies
|
||||
- nodes
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
namespace: kube-system
|
||||
{{end}}
|
||||
`
|
||||
}
|
@@ -1,13 +1,126 @@
|
||||
package network
|
||||
package templates
|
||||
|
||||
import "github.com/rancher/rke/services"
|
||||
const CanalTemplate = `
|
||||
{{if eq .RBACConfig "rbac"}}
|
||||
---
|
||||
# Calico Roles
|
||||
# Pulled from https://docs.projectcalico.org/v2.5/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: calico
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- watch
|
||||
- apiGroups: ["extensions"]
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- globalfelixconfigs
|
||||
- bgppeers
|
||||
- globalbgpconfigs
|
||||
- ippools
|
||||
- globalnetworkpolicies
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- watch
|
||||
|
||||
---
|
||||
|
||||
# Flannel roles
|
||||
# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: flannel
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes/status
|
||||
verbs:
|
||||
- patch
|
||||
---
|
||||
|
||||
# Bind the flannel ClusterRole to the canal ServiceAccount.
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: canal-flannel
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: flannel
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: canal
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
|
||||
# Bind the calico ClusterRole to the canal ServiceAccount.
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: canal-calico
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: canal
|
||||
namespace: kube-system
|
||||
|
||||
## end rbac
|
||||
{{end}}
|
||||
|
||||
func GetCanalManifest(canalConfig map[string]string) string {
|
||||
rbacConfig := ""
|
||||
if canalConfig[RBACConfig] == services.RBACAuthorizationMode {
|
||||
rbacConfig = getCanalRBACManifest()
|
||||
}
|
||||
return rbacConfig + `
|
||||
---
|
||||
# This ConfigMap can be used to configure a self-hosted Canal installation.
|
||||
kind: ConfigMap
|
||||
@@ -42,13 +155,13 @@ data:
|
||||
},
|
||||
"policy": {
|
||||
"type": "k8s",
|
||||
"k8s_api_root": "` + canalConfig[APIRoot] + `",
|
||||
"k8s_client_certificate": "` + canalConfig[ClientCert] + `",
|
||||
"k8s_client_key": "` + canalConfig[ClientKey] + `",
|
||||
"k8s_certificate_authority": "` + canalConfig[ClientCA] + `"
|
||||
"k8s_api_root": "{{.APIRoot}}",
|
||||
"k8s_client_certificate": "{{.ClientCert}}",
|
||||
"k8s_client_key": "{{.ClientKey}}",
|
||||
"k8s_certificate_authority": "{{.ClientCA}}"
|
||||
},
|
||||
"kubernetes": {
|
||||
"kubeconfig": "` + canalConfig[KubeCfg] + `"
|
||||
"kubeconfig": "{{.KubeCfg}}"
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -62,7 +175,7 @@ data:
|
||||
# Flannel network configuration. Mounted into the flannel container.
|
||||
net-conf.json: |
|
||||
{
|
||||
"Network": "` + canalConfig[ClusterCIDR] + `",
|
||||
"Network": "{{.ClusterCIDR}}",
|
||||
"Backend": {
|
||||
"Type": "vxlan"
|
||||
}
|
||||
@@ -114,7 +227,7 @@ spec:
|
||||
# container programs network policy and routes on each
|
||||
# host.
|
||||
- name: calico-node
|
||||
image: ` + canalConfig[NodeImage] + `
|
||||
image: {{.NodeImage}}
|
||||
env:
|
||||
# Use Kubernetes API as the backing datastore.
|
||||
- name: DATASTORE_TYPE
|
||||
@@ -181,7 +294,7 @@ spec:
|
||||
# This container installs the Calico CNI binaries
|
||||
# and CNI network config file on each node.
|
||||
- name: install-cni
|
||||
image: ` + canalConfig[CNIImage] + `
|
||||
image: {{.CNIImage}}
|
||||
command: ["/install-cni.sh"]
|
||||
env:
|
||||
- name: CNI_CONF_NAME
|
||||
@@ -206,7 +319,7 @@ spec:
|
||||
# This container runs flannel using the kube-subnet-mgr backend
|
||||
# for allocating subnets.
|
||||
- name: kube-flannel
|
||||
image: ` + canalConfig[FlannelImage] + `
|
||||
image: {{.CanalFlannelImg}}
|
||||
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
|
||||
securityContext:
|
||||
privileged: true
|
||||
@@ -333,126 +446,4 @@ apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: canal
|
||||
namespace: kube-system
|
||||
`
|
||||
}
|
||||
|
||||
func getCanalRBACManifest() string {
|
||||
return `
|
||||
# Calico Roles
|
||||
# Pulled from https://docs.projectcalico.org/v2.5/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: calico
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- watch
|
||||
- apiGroups: ["extensions"]
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- globalfelixconfigs
|
||||
- bgppeers
|
||||
- globalbgpconfigs
|
||||
- ippools
|
||||
- globalnetworkpolicies
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- watch
|
||||
|
||||
---
|
||||
|
||||
# Flannel roles
|
||||
# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: flannel
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes/status
|
||||
verbs:
|
||||
- patch
|
||||
---
|
||||
|
||||
# Bind the flannel ClusterRole to the canal ServiceAccount.
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: canal-flannel
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: flannel
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: canal
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
|
||||
# Bind the calico ClusterRole to the canal ServiceAccount.
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: canal-calico
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: canal
|
||||
namespace: kube-system
|
||||
|
||||
`
|
||||
}
|
||||
namespace: kube-system`
|
@@ -1,21 +1,46 @@
|
||||
package network
|
||||
package templates
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/rancher/rke/services"
|
||||
)
|
||||
|
||||
func GetFlannelManifest(flannelConfig map[string]string) string {
|
||||
var extraArgs string
|
||||
if len(flannelConfig[FlannelIface]) > 0 {
|
||||
extraArgs = fmt.Sprintf(",--iface=%s", flannelConfig[FlannelIface])
|
||||
}
|
||||
rbacConfig := ""
|
||||
if flannelConfig[RBACConfig] == services.RBACAuthorizationMode {
|
||||
rbacConfig = getFlannelRBACManifest()
|
||||
}
|
||||
return rbacConfig + `
|
||||
const FlannelTemplate = `
|
||||
{{- if eq .RBACConfig "rbac"}}
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: flannel
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: flannel
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: flannel
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: flannel
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes/status
|
||||
verbs:
|
||||
- patch
|
||||
{{- end}}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
@@ -48,7 +73,7 @@ data:
|
||||
}
|
||||
net-conf.json: |
|
||||
{
|
||||
"Network": "` + flannelConfig[ClusterCIDR] + `",
|
||||
"Network": "{{.ClusterCIDR}}",
|
||||
"Backend": {
|
||||
"Type": "vxlan"
|
||||
}
|
||||
@@ -72,7 +97,7 @@ spec:
|
||||
serviceAccountName: flannel
|
||||
containers:
|
||||
- name: kube-flannel
|
||||
image: ` + flannelConfig[FlannelImage] + `
|
||||
image: {{.Image}}
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
@@ -81,7 +106,11 @@ spec:
|
||||
requests:
|
||||
cpu: 150m
|
||||
memory: 64M
|
||||
command: ["/opt/bin/flanneld","--ip-masq","--kube-subnet-mgr"` + extraArgs + `]
|
||||
{{- if .FlannelInterface}}
|
||||
command: ["/opt/bin/flanneld","--ip-masq","--kube-subnet-mgr","--iface={{.FlannelInterface}}"]
|
||||
{{- else}}
|
||||
command: ["/opt/bin/flanneld","--ip-masq","--kube-subnet-mgr"]
|
||||
{{- end}}
|
||||
securityContext:
|
||||
privileged: true
|
||||
env:
|
||||
@@ -101,7 +130,7 @@ spec:
|
||||
- name: flannel-cfg
|
||||
mountPath: /etc/kube-flannel/
|
||||
- name: install-cni
|
||||
image: ` + flannelConfig[FlannelCNIImage] + `
|
||||
image: {{.CNIImage}}
|
||||
command: ["/install-cni.sh"]
|
||||
env:
|
||||
# The CNI network config to install on each node.
|
||||
@@ -145,46 +174,3 @@ kind: ServiceAccount
|
||||
metadata:
|
||||
name: flannel
|
||||
namespace: kube-system`
|
||||
}
|
||||
|
||||
func getFlannelRBACManifest() string {
|
||||
return `
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: flannel
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: flannel
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: flannel
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: flannel
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes/status
|
||||
verbs:
|
||||
- patch`
|
||||
}
|
35
templates/job-deployer.go
Normal file
35
templates/job-deployer.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package templates
|
||||
|
||||
const JobDeployerTemplate = `
|
||||
{{- $addonName := .AddonName }}
|
||||
{{- $nodeName := .NodeName }}
|
||||
{{- $image := .Image }}
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: {{$addonName}}-deploy-job
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
name: pi
|
||||
spec:
|
||||
hostNetwork: true
|
||||
serviceAccountName: rke-job-deployer
|
||||
nodeName: {{$nodeName}}
|
||||
containers:
|
||||
- name: {{$addonName}}-pod
|
||||
image: {{$image}}
|
||||
command: [ "kubectl", "apply", "-f" , "/etc/config/{{$addonName}}.yaml"]
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/config
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
# Provide the name of the ConfigMap containing the files you want
|
||||
# to add to the container
|
||||
name: {{$addonName}}
|
||||
items:
|
||||
- key: {{$addonName}}
|
||||
path: {{$addonName}}.yaml
|
||||
restartPolicy: Never`
|
219
templates/kubedns.go
Normal file
219
templates/kubedns.go
Normal file
@@ -0,0 +1,219 @@
|
||||
package templates
|
||||
|
||||
const KubeDNSTemplate = `
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kube-dns-autoscaler
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns-autoscaler
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns-autoscaler
|
||||
spec:
|
||||
containers:
|
||||
- name: autoscaler
|
||||
image: {{.KubeDNSAutoScalerImage}}
|
||||
resources:
|
||||
requests:
|
||||
cpu: "20m"
|
||||
memory: "10Mi"
|
||||
command:
|
||||
- /cluster-proportional-autoscaler
|
||||
- --namespace=kube-system
|
||||
- --configmap=kube-dns-autoscaler
|
||||
- --target=Deployment/kube-dns
|
||||
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
|
||||
# If using small nodes, "nodesPerReplica" should dominate.
|
||||
- --default-params={"linear":{"coresPerReplica":128,"nodesPerReplica":4,"min":1}}
|
||||
- --logtostderr=true
|
||||
- --v=2
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
# replicas: not specified here:
|
||||
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
|
||||
# 2. Default is 1.
|
||||
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 10%
|
||||
maxUnavailable: 0
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-dns
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
volumes:
|
||||
- name: kube-dns-config
|
||||
configMap:
|
||||
name: kube-dns
|
||||
optional: true
|
||||
containers:
|
||||
- name: kubedns
|
||||
image: {{.KubeDNSImage}}
|
||||
resources:
|
||||
# TODO: Set memory limits when we've profiled the container for large
|
||||
# clusters, then set request = limit to keep this container in
|
||||
# guaranteed class. Currently, this container falls into the
|
||||
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
||||
limits:
|
||||
memory: 170Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 70Mi
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/kubedns
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readiness
|
||||
port: 8081
|
||||
scheme: HTTP
|
||||
# we poll on pod startup for the Kubernetes master service and
|
||||
# only setup the /readiness HTTP server once that's available.
|
||||
initialDelaySeconds: 3
|
||||
timeoutSeconds: 5
|
||||
args:
|
||||
- --domain={{.ClusterDomain}}.
|
||||
- --dns-port=10053
|
||||
- --config-dir=/kube-dns-config
|
||||
- --v=2
|
||||
env:
|
||||
- name: PROMETHEUS_PORT
|
||||
value: "10055"
|
||||
ports:
|
||||
- containerPort: 10053
|
||||
name: dns-local
|
||||
protocol: UDP
|
||||
- containerPort: 10053
|
||||
name: dns-tcp-local
|
||||
protocol: TCP
|
||||
- containerPort: 10055
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: kube-dns-config
|
||||
mountPath: /kube-dns-config
|
||||
- name: dnsmasq
|
||||
image: {{.DNSMasqImage}}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/dnsmasq
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
args:
|
||||
- -v=2
|
||||
- -logtostderr
|
||||
- -configDir=/etc/k8s/dns/dnsmasq-nanny
|
||||
- -restartDnsmasq=true
|
||||
- --
|
||||
- -k
|
||||
- --cache-size=1000
|
||||
- --log-facility=-
|
||||
- --server=/{{.ClusterDomain}}/127.0.0.1#10053
|
||||
- --server=/in-addr.arpa/127.0.0.1#10053
|
||||
- --server=/ip6.arpa/127.0.0.1#10053
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
protocol: UDP
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
|
||||
resources:
|
||||
requests:
|
||||
cpu: 150m
|
||||
memory: 20Mi
|
||||
volumeMounts:
|
||||
- name: kube-dns-config
|
||||
mountPath: /etc/k8s/dns/dnsmasq-nanny
|
||||
- name: sidecar
|
||||
image: {{.KubednsSidecarImage}}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
args:
|
||||
- --v=2
|
||||
- --logtostderr
|
||||
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{.ClusterDomain}},5,A
|
||||
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{.ClusterDomain}},5,A
|
||||
ports:
|
||||
- containerPort: 10054
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
resources:
|
||||
requests:
|
||||
memory: 20Mi
|
||||
cpu: 10m
|
||||
dnsPolicy: Default # Don't use cluster DNS.
|
||||
serviceAccountName: kube-dns
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "KubeDNS"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
clusterIP: {{.ClusterDNSServer}}
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP`
|
15
templates/templates.go
Normal file
15
templates/templates.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package templates
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
func CompileTemplateFromMap(tmplt string, configMap map[string]string) (string, error) {
|
||||
out := new(bytes.Buffer)
|
||||
t := template.Must(template.New("compiled_template").Parse(tmplt))
|
||||
if err := t.Execute(out, configMap); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return out.String(), nil
|
||||
}
|
@@ -1,13 +1,7 @@
|
||||
package network
|
||||
package templates
|
||||
|
||||
import "github.com/rancher/rke/services"
|
||||
|
||||
func GetWeaveManifest(weaveConfig map[string]string) string {
|
||||
rbacConfig := ""
|
||||
if weaveConfig[RBACConfig] == services.RBACAuthorizationMode {
|
||||
rbacConfig = getWeaveRBACManifest()
|
||||
}
|
||||
return `
|
||||
const WeaveTemplate = `
|
||||
---
|
||||
# This ConfigMap can be used to configure a self-hosted Weave Net installation.
|
||||
apiVersion: v1
|
||||
kind: List
|
||||
@@ -41,8 +35,8 @@ items:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
- name: IPALLOC_RANGE
|
||||
value: "` + weaveConfig[ClusterCIDR] + `"
|
||||
image: ` + weaveConfig[WeaveImage] + `
|
||||
value: "{{.ClusterCIDR}}"
|
||||
image: {{.Image}}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
@@ -77,7 +71,7 @@ items:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
image: ` + weaveConfig[WeaveCNIImage] + `
|
||||
image: {{.CNIImage}}
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
@@ -119,12 +113,7 @@ items:
|
||||
path: /run/xtables.lock
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
|
||||
` + rbacConfig
|
||||
}
|
||||
|
||||
func getWeaveRBACManifest() string {
|
||||
return `
|
||||
{{- if eq .RBACConfig "rbac"}}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
@@ -213,6 +202,6 @@ roleRef:
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: weave-net
|
||||
namespace: kube-system`
|
||||
|
||||
}
|
||||
namespace: kube-system
|
||||
{{- end}}
|
||||
`
|
Reference in New Issue
Block a user