1
0
mirror of https://github.com/rancher/rke.git synced 2025-09-17 15:40:07 +00:00

Merge pull request #181 from moelsayed/use_templates

Use Go templates for addons, network plugins and other manifests
This commit is contained in:
Alena Prokharchyk
2018-01-02 13:30:12 -08:00
committed by GitHub
16 changed files with 702 additions and 683 deletions

View File

@@ -1,33 +1,12 @@
package addons package addons
func GetAddonsExcuteJob(addonName, nodeName, image string) string { import "github.com/rancher/rke/templates"
return `apiVersion: batch/v1
kind: Job func GetAddonsExcuteJob(addonName, nodeName, image string) (string, error) {
metadata: jobConfig := map[string]string{
name: ` + addonName + `-deploy-job "AddonName": addonName,
spec: "NodeName": nodeName,
template: "Image": image,
metadata: }
name: pi return templates.CompileTemplateFromMap(templates.JobDeployerTemplate, jobConfig)
spec:
hostNetwork: true
serviceAccountName: rke-job-deployer
nodeName: ` + nodeName + `
containers:
- name: ` + addonName + `-pod
image: ` + image + `
command: [ "kubectl", "apply", "-f" , "/etc/config/` + addonName + `.yaml"]
volumeMounts:
- name: config-volume
mountPath: /etc/config
volumes:
- name: config-volume
configMap:
# Provide the name of the ConfigMap containing the files you want
# to add to the container
name: ` + addonName + `
items:
- key: ` + addonName + `
path: ` + addonName + `.yaml
restartPolicy: Never`
} }

View File

@@ -17,10 +17,13 @@ const (
) )
func TestJobManifest(t *testing.T) { func TestJobManifest(t *testing.T) {
jobYaml := GetAddonsExcuteJob(FakeAddonName, FakeNodeName, FakeAddonImage) jobYaml, err := GetAddonsExcuteJob(FakeAddonName, FakeNodeName, FakeAddonImage)
if err != nil {
t.Fatalf("Failed to get addon execute job: %v", err)
}
job := v1.Job{} job := v1.Job{}
decoder := yamlutil.NewYAMLToJSONDecoder(bytes.NewReader([]byte(jobYaml))) decoder := yamlutil.NewYAMLToJSONDecoder(bytes.NewReader([]byte(jobYaml)))
err := decoder.Decode(&job) err = decoder.Decode(&job)
if err != nil { if err != nil {
t.Fatalf("Failed To decode Job yaml: %v", err) t.Fatalf("Failed To decode Job yaml: %v", err)
} }

View File

@@ -1,231 +1,17 @@
package addons package addons
import "github.com/rancher/rke/templates"
const ( const (
KubeDNSImage = "kubeDNSImage" KubeDNSImage = "KubeDNSImage"
DNSMasqImage = "DNSMasqImage" DNSMasqImage = "DNSMasqImage"
KubeDNSSidecarImage = "kubednsSidecarImage" KubeDNSSidecarImage = "KubednsSidecarImage"
KubeDNSAutoScalerImage = "kubeDNSAutoScalerImage" KubeDNSAutoScalerImage = "KubeDNSAutoScalerImage"
KubeDNSServer = "clusterDNSServer" KubeDNSServer = "ClusterDNSServer"
KubeDNSClusterDomain = "clusterDomain" KubeDNSClusterDomain = "ClusterDomain"
) )
func GetKubeDNSManifest(kubeDNSConfig map[string]string) string { func GetKubeDNSManifest(kubeDNSConfig map[string]string) (string, error) {
return `---
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: kube-dns-autoscaler
namespace: kube-system
labels:
k8s-app: kube-dns-autoscaler
spec:
template:
metadata:
labels:
k8s-app: kube-dns-autoscaler
spec:
containers:
- name: autoscaler
image: ` + kubeDNSConfig[KubeDNSAutoScalerImage] + `
resources:
requests:
cpu: "20m"
memory: "10Mi"
command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=kube-dns-autoscaler
- --target=Deployment/kube-dns
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
# If using small nodes, "nodesPerReplica" should dominate.
- --default-params={"linear":{"coresPerReplica":128,"nodesPerReplica":4,"min":1}}
- --logtostderr=true
- --v=2
--- return templates.CompileTemplateFromMap(templates.KubeDNSTemplate, kubeDNSConfig)
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-dns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
volumes:
- name: kube-dns-config
configMap:
name: kube-dns
optional: true
containers:
- name: kubedns
image: ` + kubeDNSConfig[KubeDNSImage] + `
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
livenessProbe:
httpGet:
path: /healthcheck/kubedns
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
# we poll on pod startup for the Kubernetes master service and
# only setup the /readiness HTTP server once that's available.
initialDelaySeconds: 3
timeoutSeconds: 5
args:
- --domain=` + kubeDNSConfig[KubeDNSClusterDomain] + `.
- --dns-port=10053
- --config-dir=/kube-dns-config
- --v=2
env:
- name: PROMETHEUS_PORT
value: "10055"
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- containerPort: 10055
name: metrics
protocol: TCP
volumeMounts:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: ` + kubeDNSConfig[DNSMasqImage] + `
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- -v=2
- -logtostderr
- -configDir=/etc/k8s/dns/dnsmasq-nanny
- -restartDnsmasq=true
- --
- -k
- --cache-size=1000
- --log-facility=-
- --server=/` + kubeDNSConfig[KubeDNSClusterDomain] + `/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
- --server=/ip6.arpa/127.0.0.1#10053
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
resources:
requests:
cpu: 150m
memory: 20Mi
volumeMounts:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: ` + kubeDNSConfig[KubeDNSSidecarImage] + `
livenessProbe:
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- --v=2
- --logtostderr
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.` + kubeDNSConfig[KubeDNSClusterDomain] + `,5,A
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.` + kubeDNSConfig[KubeDNSClusterDomain] + `,5,A
ports:
- containerPort: 10054
name: metrics
protocol: TCP
resources:
requests:
memory: 20Mi
cpu: 10m
dnsPolicy: Default # Don't use cluster DNS.
serviceAccountName: kube-dns
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "KubeDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: ` + kubeDNSConfig[KubeDNSServer] + `
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
`
} }

View File

@@ -2,6 +2,7 @@ package authz
import ( import (
"github.com/rancher/rke/k8s" "github.com/rancher/rke/k8s"
"github.com/rancher/rke/templates"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
@@ -11,10 +12,10 @@ func ApplyJobDeployerServiceAccount(kubeConfigPath string) error {
if err != nil { if err != nil {
return err return err
} }
if err := k8s.UpdateClusterRoleBindingFromYaml(k8sClient, jobDeployerClusterRoleBinding); err != nil { if err := k8s.UpdateClusterRoleBindingFromYaml(k8sClient, templates.JobDeployerClusterRoleBinding); err != nil {
return err return err
} }
if err := k8s.UpdateServiceAccountFromYaml(k8sClient, jobDeployerServiceAccount); err != nil { if err := k8s.UpdateServiceAccountFromYaml(k8sClient, templates.JobDeployerServiceAccount); err != nil {
return err return err
} }
logrus.Infof("[authz] rke-job-deployer ServiceAccount created successfully") logrus.Infof("[authz] rke-job-deployer ServiceAccount created successfully")
@@ -27,7 +28,7 @@ func ApplySystemNodeClusterRoleBinding(kubeConfigPath string) error {
if err != nil { if err != nil {
return err return err
} }
if err := k8s.UpdateClusterRoleBindingFromYaml(k8sClient, systemNodeClusterRoleBinding); err != nil { if err := k8s.UpdateClusterRoleBindingFromYaml(k8sClient, templates.SystemNodeClusterRoleBinding); err != nil {
return err return err
} }
logrus.Infof("[authz] system:node ClusterRoleBinding created successfully") logrus.Infof("[authz] system:node ClusterRoleBinding created successfully")

View File

@@ -2,6 +2,7 @@ package authz
import ( import (
"github.com/rancher/rke/k8s" "github.com/rancher/rke/k8s"
"github.com/rancher/rke/templates"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
@@ -11,7 +12,7 @@ func ApplyDefaultPodSecurityPolicy(kubeConfigPath string) error {
if err != nil { if err != nil {
return err return err
} }
if err := k8s.UpdatePodSecurityPolicyFromYaml(k8sClient, DefaultPodSecurityPolicy); err != nil { if err := k8s.UpdatePodSecurityPolicyFromYaml(k8sClient, templates.DefaultPodSecurityPolicy); err != nil {
return err return err
} }
logrus.Infof("[authz] Default PodSecurityPolicy applied successfully") logrus.Infof("[authz] Default PodSecurityPolicy applied successfully")
@@ -24,10 +25,10 @@ func ApplyDefaultPodSecurityPolicyRole(kubeConfigPath string) error {
if err != nil { if err != nil {
return err return err
} }
if err := k8s.UpdateRoleFromYaml(k8sClient, DefaultPodSecurityRole); err != nil { if err := k8s.UpdateRoleFromYaml(k8sClient, templates.DefaultPodSecurityRole); err != nil {
return err return err
} }
if err := k8s.UpdateRoleBindingFromYaml(k8sClient, DefaultPodSecurityRoleBinding); err != nil { if err := k8s.UpdateRoleBindingFromYaml(k8sClient, templates.DefaultPodSecurityRoleBinding); err != nil {
return err return err
} }
logrus.Infof("[authz] Default PodSecurityPolicy Role and RoleBinding applied successfully") logrus.Infof("[authz] Default PodSecurityPolicy Role and RoleBinding applied successfully")

View File

@@ -44,7 +44,10 @@ func (c *Cluster) deployKubeDNS() error {
addons.KubeDNSSidecarImage: c.SystemImages[KubeDNSSidecarImage], addons.KubeDNSSidecarImage: c.SystemImages[KubeDNSSidecarImage],
addons.KubeDNSAutoScalerImage: c.SystemImages[KubeDNSAutoScalerImage], addons.KubeDNSAutoScalerImage: c.SystemImages[KubeDNSAutoScalerImage],
} }
kubeDNSYaml := addons.GetKubeDNSManifest(kubeDNSConfig) kubeDNSYaml, err := addons.GetKubeDNSManifest(kubeDNSConfig)
if err != nil {
return err
}
if err := c.doAddonDeploy(kubeDNSYaml, KubeDNSAddonResourceName); err != nil { if err := c.doAddonDeploy(kubeDNSYaml, KubeDNSAddonResourceName); err != nil {
return err return err
} }
@@ -62,7 +65,10 @@ func (c *Cluster) doAddonDeploy(addonYaml, resourceName string) error {
logrus.Infof("[addons] Executing deploy job..") logrus.Infof("[addons] Executing deploy job..")
addonJob := addons.GetAddonsExcuteJob(resourceName, c.ControlPlaneHosts[0].HostnameOverride, c.Services.KubeAPI.Image) addonJob, err := addons.GetAddonsExcuteJob(resourceName, c.ControlPlaneHosts[0].HostnameOverride, c.Services.KubeAPI.Image)
if err != nil {
return fmt.Errorf("Failed to deploy addon execute job: %v", err)
}
err = c.ApplySystemAddonExcuteJob(addonJob) err = c.ApplySystemAddonExcuteJob(addonJob)
if err != nil { if err != nil {
return fmt.Errorf("Failed to deploy addon execute job: %v", err) return fmt.Errorf("Failed to deploy addon execute job: %v", err)

View File

@@ -3,9 +3,9 @@ package cluster
import ( import (
"fmt" "fmt"
"github.com/rancher/rke/network"
"github.com/rancher/rke/pki" "github.com/rancher/rke/pki"
"github.com/rancher/rke/services" "github.com/rancher/rke/services"
"github.com/rancher/rke/templates"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
@@ -20,7 +20,7 @@ const (
CalicoNetworkPlugin = "calico" CalicoNetworkPlugin = "calico"
CalicoNodeImage = "calico_node_image" CalicoNodeImage = "calico_node_image"
CalicoCNIImage = "calico_cni_image" CalicoCNIImage = "calico_cni_image"
CalicoControllersImages = "calico_controllers_image" CalicoControllersImage = "calico_controllers_image"
CalicoctlImage = "calicoctl_image" CalicoctlImage = "calicoctl_image"
CalicoCloudProvider = "calico_cloud_provider" CalicoCloudProvider = "calico_cloud_provider"
@@ -32,6 +32,35 @@ const (
WeaveNetworkPlugin = "weave" WeaveNetworkPlugin = "weave"
WeaveImage = "weave_node_image" WeaveImage = "weave_node_image"
WeaveCNIImage = "weave_cni_image" WeaveCNIImage = "weave_cni_image"
// List of map keys to be used with network templates
// EtcdEndpoints is the server address for Etcd, used by calico
EtcdEndpoints = "EtcdEndpoints"
// APIRoot is the kubernetes API address
APIRoot = "APIRoot"
// kubernetes client certificates and kubeconfig paths
ClientCert = "ClientCert"
ClientKey = "ClientKey"
ClientCA = "ClientCA"
KubeCfg = "KubeCfg"
ClusterCIDR = "ClusterCIDR"
// Images key names
Image = "Image"
CNIImage = "CNIImage"
NodeImage = "NodeImage"
ControllersImage = "ControllersImage"
CanalFlannelImg = "CanalFlannelImg"
Calicoctl = "Calicoctl"
FlannelInterface = "FlannelInterface"
CloudProvider = "CloudProvider"
AWSCloudProvider = "aws"
RBACConfig = "RBACConfig"
) )
func (c *Cluster) DeployNetworkPlugin() error { func (c *Cluster) DeployNetworkPlugin() error {
@@ -52,60 +81,73 @@ func (c *Cluster) DeployNetworkPlugin() error {
func (c *Cluster) doFlannelDeploy() error { func (c *Cluster) doFlannelDeploy() error {
flannelConfig := map[string]string{ flannelConfig := map[string]string{
network.ClusterCIDR: c.ClusterCIDR, ClusterCIDR: c.ClusterCIDR,
network.FlannelImage: c.Network.Options[FlannelImage], Image: c.Network.Options[FlannelImage],
network.FlannelCNIImage: c.Network.Options[FlannelCNIImage], CNIImage: c.Network.Options[FlannelCNIImage],
network.FlannelIface: c.Network.Options[FlannelIface], FlannelInterface: c.Network.Options[FlannelIface],
network.RBACConfig: c.Authorization.Mode, RBACConfig: c.Authorization.Mode,
}
pluginYaml, err := c.getNetworkPluginManifest(flannelConfig)
if err != nil {
return err
} }
pluginYaml := network.GetFlannelManifest(flannelConfig)
return c.doAddonDeploy(pluginYaml, NetworkPluginResourceName) return c.doAddonDeploy(pluginYaml, NetworkPluginResourceName)
} }
func (c *Cluster) doCalicoDeploy() error { func (c *Cluster) doCalicoDeploy() error {
calicoConfig := map[string]string{ calicoConfig := map[string]string{
network.EtcdEndpoints: services.GetEtcdConnString(c.EtcdHosts), EtcdEndpoints: services.GetEtcdConnString(c.EtcdHosts),
network.APIRoot: "https://127.0.0.1:6443", APIRoot: "https://127.0.0.1:6443",
network.ClientCert: pki.KubeNodeCertPath, ClientCert: pki.KubeNodeCertPath,
network.ClientKey: pki.KubeNodeKeyPath, ClientKey: pki.KubeNodeKeyPath,
network.ClientCA: pki.CACertPath, ClientCA: pki.CACertPath,
network.KubeCfg: pki.KubeNodeConfigPath, KubeCfg: pki.KubeNodeConfigPath,
network.ClusterCIDR: c.ClusterCIDR, ClusterCIDR: c.ClusterCIDR,
network.CNIImage: c.Network.Options[CalicoCNIImage], CNIImage: c.Network.Options[CalicoCNIImage],
network.NodeImage: c.Network.Options[CalicoNodeImage], NodeImage: c.Network.Options[CalicoNodeImage],
network.ControllersImage: c.Network.Options[CalicoControllersImages], ControllersImage: c.Network.Options[CalicoControllersImage],
network.CalicoctlImage: c.Network.Options[CalicoctlImage], Calicoctl: c.Network.Options[CalicoctlImage],
network.CloudProvider: c.Network.Options[CalicoCloudProvider], CloudProvider: c.Network.Options[CalicoCloudProvider],
network.RBACConfig: c.Authorization.Mode, RBACConfig: c.Authorization.Mode,
}
pluginYaml, err := c.getNetworkPluginManifest(calicoConfig)
if err != nil {
return err
} }
pluginYaml := network.GetCalicoManifest(calicoConfig)
return c.doAddonDeploy(pluginYaml, NetworkPluginResourceName) return c.doAddonDeploy(pluginYaml, NetworkPluginResourceName)
} }
func (c *Cluster) doCanalDeploy() error { func (c *Cluster) doCanalDeploy() error {
canalConfig := map[string]string{ canalConfig := map[string]string{
network.ClientCert: pki.KubeNodeCertPath, ClientCert: pki.KubeNodeCertPath,
network.ClientKey: pki.KubeNodeKeyPath, APIRoot: "https://127.0.0.1:6443",
network.ClientCA: pki.CACertPath, ClientKey: pki.KubeNodeKeyPath,
network.KubeCfg: pki.KubeNodeConfigPath, ClientCA: pki.CACertPath,
network.ClusterCIDR: c.ClusterCIDR, KubeCfg: pki.KubeNodeConfigPath,
network.NodeImage: c.Network.Options[CanalNodeImage], ClusterCIDR: c.ClusterCIDR,
network.CNIImage: c.Network.Options[CanalCNIImage], NodeImage: c.Network.Options[CanalNodeImage],
network.FlannelImage: c.Network.Options[CanalFlannelImage], CNIImage: c.Network.Options[CanalCNIImage],
network.RBACConfig: c.Authorization.Mode, CanalFlannelImg: c.Network.Options[CanalFlannelImage],
RBACConfig: c.Authorization.Mode,
}
pluginYaml, err := c.getNetworkPluginManifest(canalConfig)
if err != nil {
return err
} }
pluginYaml := network.GetCanalManifest(canalConfig)
return c.doAddonDeploy(pluginYaml, NetworkPluginResourceName) return c.doAddonDeploy(pluginYaml, NetworkPluginResourceName)
} }
func (c *Cluster) doWeaveDeploy() error { func (c *Cluster) doWeaveDeploy() error {
weaveConfig := map[string]string{ weaveConfig := map[string]string{
network.ClusterCIDR: c.ClusterCIDR, ClusterCIDR: c.ClusterCIDR,
network.WeaveImage: c.Network.Options[WeaveImage], Image: c.Network.Options[WeaveImage],
network.WeaveCNIImage: c.Network.Options[WeaveCNIImage], CNIImage: c.Network.Options[WeaveCNIImage],
network.RBACConfig: c.Authorization.Mode, RBACConfig: c.Authorization.Mode,
}
pluginYaml, err := c.getNetworkPluginManifest(weaveConfig)
if err != nil {
return err
} }
pluginYaml := network.GetWeaveManifest(weaveConfig)
return c.doAddonDeploy(pluginYaml, NetworkPluginResourceName) return c.doAddonDeploy(pluginYaml, NetworkPluginResourceName)
} }
@@ -117,38 +159,52 @@ func (c *Cluster) setClusterNetworkDefaults() {
c.Network.Options = make(map[string]string) c.Network.Options = make(map[string]string)
} }
networkPluginConfigDefaultsMap := make(map[string]string) networkPluginConfigDefaultsMap := make(map[string]string)
switch { switch c.Network.Plugin {
case c.Network.Plugin == FlannelNetworkPlugin: case FlannelNetworkPlugin:
networkPluginConfigDefaultsMap = map[string]string{ networkPluginConfigDefaultsMap = map[string]string{
FlannelImage: DefaultFlannelImage, FlannelImage: DefaultFlannelImage,
FlannelCNIImage: DefaultFlannelCNIImage, FlannelCNIImage: DefaultFlannelCNIImage,
} }
case c.Network.Plugin == CalicoNetworkPlugin: case CalicoNetworkPlugin:
networkPluginConfigDefaultsMap = map[string]string{ networkPluginConfigDefaultsMap = map[string]string{
CalicoCNIImage: DefaultCalicoCNIImage, CalicoCNIImage: DefaultCalicoCNIImage,
CalicoNodeImage: DefaultCalicoNodeImage, CalicoNodeImage: DefaultCalicoNodeImage,
CalicoControllersImages: DefaultCalicoControllersImage, CalicoControllersImage: DefaultCalicoControllersImage,
CalicoCloudProvider: DefaultNetworkCloudProvider, CalicoCloudProvider: DefaultNetworkCloudProvider,
CalicoctlImage: DefaultCalicoctlImage, CalicoctlImage: DefaultCalicoctlImage,
} }
case c.Network.Plugin == CanalNetworkPlugin: case CanalNetworkPlugin:
networkPluginConfigDefaultsMap = map[string]string{ networkPluginConfigDefaultsMap = map[string]string{
CanalCNIImage: DefaultCanalCNIImage, CanalCNIImage: DefaultCanalCNIImage,
CanalNodeImage: DefaultCanalNodeImage, CanalNodeImage: DefaultCanalNodeImage,
CanalFlannelImage: DefaultCanalFlannelImage, CanalFlannelImage: DefaultCanalFlannelImage,
} }
case c.Network.Plugin == WeaveNetworkPlugin: case WeaveNetworkPlugin:
networkPluginConfigDefaultsMap = map[string]string{ networkPluginConfigDefaultsMap = map[string]string{
WeaveImage: DefaultWeaveImage, WeaveImage: DefaultWeaveImage,
WeaveCNIImage: DefaultWeaveCNIImage, WeaveCNIImage: DefaultWeaveCNIImage,
} }
} }
for k, v := range networkPluginConfigDefaultsMap { for k, v := range networkPluginConfigDefaultsMap {
setDefaultIfEmptyMapValue(c.Network.Options, k, v) setDefaultIfEmptyMapValue(c.Network.Options, k, v)
} }
} }
func (c *Cluster) getNetworkPluginManifest(pluginConfig map[string]string) (string, error) {
switch c.Network.Plugin {
case FlannelNetworkPlugin:
return templates.CompileTemplateFromMap(templates.FlannelTemplate, pluginConfig)
case CalicoNetworkPlugin:
return templates.CompileTemplateFromMap(templates.CalicoTemplate, pluginConfig)
case CanalNetworkPlugin:
return templates.CompileTemplateFromMap(templates.CanalTemplate, pluginConfig)
case WeaveNetworkPlugin:
return templates.CompileTemplateFromMap(templates.WeaveTemplate, pluginConfig)
default:
return "", fmt.Errorf("[network] Unsupported network plugin: %s", c.Network.Plugin)
}
}

View File

@@ -1,23 +0,0 @@
package network
const (
EtcdEndpoints = "etcdEndpoints"
APIRoot = "apiRoot"
ClientCert = "clientCert"
ClientKey = "clientKey"
ClientCA = "clientCA"
KubeCfg = "kubeCfg"
ClusterCIDR = "clusterCIDR"
CNIImage = "cniImage"
NodeImage = "nodeImage"
ControllersImage = "controllersImage"
CalicoctlImage = "calicoctlImage"
FlannelImage = "flannelImage"
FlannelCNIImage = "flannelCNIImage"
FlannelIface = "flannelIface"
CloudProvider = "cloudprovider"
AWSCloudProvider = "aws"
RBACConfig = "rbacConfig"
WeaveImage = "weaveImage"
WeaveCNIImage = "weaveCNIImage"
)

View File

@@ -1,7 +1,7 @@
package authz package templates
const ( const (
systemNodeClusterRoleBinding = ` SystemNodeClusterRoleBinding = `
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding kind: ClusterRoleBinding
metadata: metadata:
@@ -19,14 +19,14 @@ subjects:
name: system:nodes name: system:nodes
apiGroup: rbac.authorization.k8s.io` apiGroup: rbac.authorization.k8s.io`
jobDeployerServiceAccount = ` JobDeployerServiceAccount = `
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
name: rke-job-deployer name: rke-job-deployer
namespace: kube-system` namespace: kube-system`
jobDeployerClusterRoleBinding = ` JobDeployerClusterRoleBinding = `
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding kind: ClusterRoleBinding
metadata: metadata:

View File

@@ -1,19 +1,79 @@
package network package templates
import "github.com/rancher/rke/services" const CalicoTemplate = `
{{if eq .RBACConfig "rbac"}}
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico-cni-plugin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-cni-plugin
subjects:
- kind: ServiceAccount
name: calico-cni-plugin
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-cni-plugin
rules:
- apiGroups: [""]
resources:
- pods
- nodes
verbs:
- get
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-cni-plugin
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico-kube-controllers
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-kube-controllers
subjects:
- kind: ServiceAccount
name: calico-kube-controllers
namespace: kube-system
func GetCalicoManifest(calicoConfig map[string]string) string { ---
awsIPPool := "" kind: ClusterRole
if calicoConfig[CloudProvider] == AWSCloudProvider { apiVersion: rbac.authorization.k8s.io/v1beta1
awsIPPool = getCalicoAWSIPPoolManifest(calicoConfig) metadata:
} name: calico-kube-controllers
rules:
- apiGroups:
- ""
- extensions
resources:
- pods
- namespaces
- networkpolicies
- nodes
verbs:
- watch
- list
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-kube-controllers
namespace: kube-system
rbacConfig := "" ## end rbac here
if calicoConfig[RBACConfig] == services.RBACAuthorizationMode { {{end}}
rbacConfig = getCalicoRBACManifest()
}
return rbacConfig + `
--- ---
# Calico Version master # Calico Version master
# https://docs.projectcalico.org/master/releases#master # https://docs.projectcalico.org/master/releases#master
@@ -30,7 +90,7 @@ metadata:
namespace: kube-system namespace: kube-system
data: data:
# Configure this with the location of your etcd cluster. # Configure this with the location of your etcd cluster.
etcd_endpoints: "` + calicoConfig[EtcdEndpoints] + `" etcd_endpoints: "{{.EtcdEndpoints}}"
# Configure the Calico backend to use. # Configure the Calico backend to use.
calico_backend: "bird" calico_backend: "bird"
@@ -43,7 +103,7 @@ data:
"plugins": [ "plugins": [
{ {
"type": "calico", "type": "calico",
"etcd_endpoints": "` + calicoConfig[EtcdEndpoints] + `", "etcd_endpoints": "{{.EtcdEndpoints}}",
"etcd_key_file": "", "etcd_key_file": "",
"etcd_cert_file": "", "etcd_cert_file": "",
"etcd_ca_cert_file": "", "etcd_ca_cert_file": "",
@@ -54,13 +114,13 @@ data:
}, },
"policy": { "policy": {
"type": "k8s", "type": "k8s",
"k8s_api_root": "` + calicoConfig[APIRoot] + `", "k8s_api_root": "{{.APIRoot}}",
"k8s_client_certificate": "` + calicoConfig[ClientCert] + `", "k8s_client_certificate": "{{.ClientCert}}",
"k8s_client_key": "` + calicoConfig[ClientKey] + `", "k8s_client_key": "{{.ClientKey}}",
"k8s_certificate_authority": "` + calicoConfig[ClientCA] + `" "k8s_certificate_authority": "{{.ClientCA}}"
}, },
"kubernetes": { "kubernetes": {
"kubeconfig": "` + calicoConfig[KubeCfg] + `" "kubeconfig": "{{.KubeCfg}}"
} }
}, },
{ {
@@ -137,7 +197,7 @@ spec:
# container programs network policy and routes on each # container programs network policy and routes on each
# host. # host.
- name: calico-node - name: calico-node
image: ` + calicoConfig[NodeImage] + ` image: {{.NodeImage}}
env: env:
# The location of the Calico etcd cluster. # The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS - name: ETCD_ENDPOINTS
@@ -162,7 +222,7 @@ spec:
value: "ACCEPT" value: "ACCEPT"
# Configure the IP Pool from which Pod IPs will be chosen. # Configure the IP Pool from which Pod IPs will be chosen.
- name: CALICO_IPV4POOL_CIDR - name: CALICO_IPV4POOL_CIDR
value: "` + calicoConfig[ClusterCIDR] + `" value: "{{.ClusterCIDR}}"
- name: CALICO_IPV4POOL_IPIP - name: CALICO_IPV4POOL_IPIP
value: "Always" value: "Always"
# Disable IPv6 on Kubernetes. # Disable IPv6 on Kubernetes.
@@ -228,7 +288,7 @@ spec:
# This container installs the Calico CNI binaries # This container installs the Calico CNI binaries
# and CNI network config file on each node. # and CNI network config file on each node.
- name: install-cni - name: install-cni
image: ` + calicoConfig[CNIImage] + ` image: {{.CNIImage}}
command: ["/install-cni.sh"] command: ["/install-cni.sh"]
env: env:
# Name of the CNI config file to create. # Name of the CNI config file to create.
@@ -317,7 +377,7 @@ spec:
operator: "Exists" operator: "Exists"
containers: containers:
- name: calico-kube-controllers - name: calico-kube-controllers
image: ` + calicoConfig[ControllersImage] + ` image: {{.ControllersImage}}
env: env:
# The location of the Calico etcd cluster. # The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS - name: ETCD_ENDPOINTS
@@ -384,7 +444,7 @@ spec:
serviceAccountName: calico-kube-controllers serviceAccountName: calico-kube-controllers
containers: containers:
- name: calico-policy-controller - name: calico-policy-controller
image: ` + calicoConfig[ControllersImage] + ` image: {{.ControllersImage}}
env: env:
# The location of the Calico etcd cluster. # The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS - name: ETCD_ENDPOINTS
@@ -407,12 +467,10 @@ kind: ServiceAccount
metadata: metadata:
name: calico-node name: calico-node
namespace: kube-system namespace: kube-system
` + awsIPPool + `
`
}
func getCalicoAWSIPPoolManifest(calicoConfig map[string]string) string {
return ` {{if eq .CloudProvider "aws"}}
## aws stuff here
--- ---
kind: ConfigMap kind: ConfigMap
apiVersion: v1 apiVersion: v1
@@ -424,7 +482,7 @@ data:
apiVersion: v1 apiVersion: v1
kind: ipPool kind: ipPool
metadata: metadata:
cidr: ` + calicoConfig[ClusterCIDR] + ` cidr: {{.ClusterCIDR}}
spec: spec:
nat-outgoing: true nat-outgoing: true
--- ---
@@ -438,7 +496,7 @@ spec:
restartPolicy: OnFailure restartPolicy: OnFailure
containers: containers:
- name: calicoctl - name: calicoctl
image: ` + calicoConfig[CalicoctlImage] + ` image: {{.Calicoctl}}
command: ["/bin/sh", "-c", "calicoctl apply -f aws-ippool.yaml"] command: ["/bin/sh", "-c", "calicoctl apply -f aws-ippool.yaml"]
env: env:
- name: ETCD_ENDPOINTS - name: ETCD_ENDPOINTS
@@ -456,88 +514,5 @@ spec:
items: items:
- key: aws-ippool - key: aws-ippool
path: aws-ippool.yaml path: aws-ippool.yaml
{{end}}
` `
}
func getCalicoRBACManifest() string {
return `
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico-cni-plugin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-cni-plugin
subjects:
- kind: ServiceAccount
name: calico-cni-plugin
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-cni-plugin
rules:
- apiGroups: [""]
resources:
- pods
- nodes
verbs:
- get
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-cni-plugin
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico-kube-controllers
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-kube-controllers
subjects:
- kind: ServiceAccount
name: calico-kube-controllers
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-kube-controllers
rules:
- apiGroups:
- ""
- extensions
resources:
- pods
- namespaces
- networkpolicies
- nodes
verbs:
- watch
- list
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-kube-controllers
namespace: kube-system
`
}

View File

@@ -1,13 +1,126 @@
package network package templates
import "github.com/rancher/rke/services" const CanalTemplate = `
{{if eq .RBACConfig "rbac"}}
---
# Calico Roles
# Pulled from https://docs.projectcalico.org/v2.5/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico
rules:
- apiGroups: [""]
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- pods/status
verbs:
- update
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- update
- watch
- apiGroups: ["extensions"]
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalfelixconfigs
- bgppeers
- globalbgpconfigs
- ippools
- globalnetworkpolicies
verbs:
- create
- get
- list
- update
- watch
---
# Flannel roles
# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
# Bind the flannel ClusterRole to the canal ServiceAccount.
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: canal-flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
---
# Bind the calico ClusterRole to the canal ServiceAccount.
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: canal-calico
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
## end rbac
{{end}}
func GetCanalManifest(canalConfig map[string]string) string {
rbacConfig := ""
if canalConfig[RBACConfig] == services.RBACAuthorizationMode {
rbacConfig = getCanalRBACManifest()
}
return rbacConfig + `
--- ---
# This ConfigMap can be used to configure a self-hosted Canal installation. # This ConfigMap can be used to configure a self-hosted Canal installation.
kind: ConfigMap kind: ConfigMap
@@ -42,13 +155,13 @@ data:
}, },
"policy": { "policy": {
"type": "k8s", "type": "k8s",
"k8s_api_root": "` + canalConfig[APIRoot] + `", "k8s_api_root": "{{.APIRoot}}",
"k8s_client_certificate": "` + canalConfig[ClientCert] + `", "k8s_client_certificate": "{{.ClientCert}}",
"k8s_client_key": "` + canalConfig[ClientKey] + `", "k8s_client_key": "{{.ClientKey}}",
"k8s_certificate_authority": "` + canalConfig[ClientCA] + `" "k8s_certificate_authority": "{{.ClientCA}}"
}, },
"kubernetes": { "kubernetes": {
"kubeconfig": "` + canalConfig[KubeCfg] + `" "kubeconfig": "{{.KubeCfg}}"
} }
}, },
{ {
@@ -62,7 +175,7 @@ data:
# Flannel network configuration. Mounted into the flannel container. # Flannel network configuration. Mounted into the flannel container.
net-conf.json: | net-conf.json: |
{ {
"Network": "` + canalConfig[ClusterCIDR] + `", "Network": "{{.ClusterCIDR}}",
"Backend": { "Backend": {
"Type": "vxlan" "Type": "vxlan"
} }
@@ -114,7 +227,7 @@ spec:
# container programs network policy and routes on each # container programs network policy and routes on each
# host. # host.
- name: calico-node - name: calico-node
image: ` + canalConfig[NodeImage] + ` image: {{.NodeImage}}
env: env:
# Use Kubernetes API as the backing datastore. # Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE - name: DATASTORE_TYPE
@@ -181,7 +294,7 @@ spec:
# This container installs the Calico CNI binaries # This container installs the Calico CNI binaries
# and CNI network config file on each node. # and CNI network config file on each node.
- name: install-cni - name: install-cni
image: ` + canalConfig[CNIImage] + ` image: {{.CNIImage}}
command: ["/install-cni.sh"] command: ["/install-cni.sh"]
env: env:
- name: CNI_CONF_NAME - name: CNI_CONF_NAME
@@ -206,7 +319,7 @@ spec:
# This container runs flannel using the kube-subnet-mgr backend # This container runs flannel using the kube-subnet-mgr backend
# for allocating subnets. # for allocating subnets.
- name: kube-flannel - name: kube-flannel
image: ` + canalConfig[FlannelImage] + ` image: {{.CanalFlannelImg}}
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ] command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
securityContext: securityContext:
privileged: true privileged: true
@@ -333,126 +446,4 @@ apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
name: canal name: canal
namespace: kube-system namespace: kube-system`
`
}
func getCanalRBACManifest() string {
return `
# Calico Roles
# Pulled from https://docs.projectcalico.org/v2.5/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico
rules:
- apiGroups: [""]
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- pods/status
verbs:
- update
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- update
- watch
- apiGroups: ["extensions"]
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalfelixconfigs
- bgppeers
- globalbgpconfigs
- ippools
- globalnetworkpolicies
verbs:
- create
- get
- list
- update
- watch
---
# Flannel roles
# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
# Bind the flannel ClusterRole to the canal ServiceAccount.
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: canal-flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
---
# Bind the calico ClusterRole to the canal ServiceAccount.
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: canal-calico
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
`
}

View File

@@ -1,21 +1,46 @@
package network package templates
import ( const FlannelTemplate = `
"fmt" {{- if eq .RBACConfig "rbac"}}
---
"github.com/rancher/rke/services" kind: ClusterRoleBinding
) apiVersion: rbac.authorization.k8s.io/v1
metadata:
func GetFlannelManifest(flannelConfig map[string]string) string { name: flannel
var extraArgs string roleRef:
if len(flannelConfig[FlannelIface]) > 0 { apiGroup: rbac.authorization.k8s.io
extraArgs = fmt.Sprintf(",--iface=%s", flannelConfig[FlannelIface]) kind: ClusterRole
} name: flannel
rbacConfig := "" subjects:
if flannelConfig[RBACConfig] == services.RBACAuthorizationMode { - kind: ServiceAccount
rbacConfig = getFlannelRBACManifest() name: flannel
} namespace: kube-system
return rbacConfig + ` ---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
{{- end}}
--- ---
kind: ConfigMap kind: ConfigMap
apiVersion: v1 apiVersion: v1
@@ -48,7 +73,7 @@ data:
} }
net-conf.json: | net-conf.json: |
{ {
"Network": "` + flannelConfig[ClusterCIDR] + `", "Network": "{{.ClusterCIDR}}",
"Backend": { "Backend": {
"Type": "vxlan" "Type": "vxlan"
} }
@@ -72,7 +97,7 @@ spec:
serviceAccountName: flannel serviceAccountName: flannel
containers: containers:
- name: kube-flannel - name: kube-flannel
image: ` + flannelConfig[FlannelImage] + ` image: {{.Image}}
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
resources: resources:
limits: limits:
@@ -81,7 +106,11 @@ spec:
requests: requests:
cpu: 150m cpu: 150m
memory: 64M memory: 64M
command: ["/opt/bin/flanneld","--ip-masq","--kube-subnet-mgr"` + extraArgs + `] {{- if .FlannelInterface}}
command: ["/opt/bin/flanneld","--ip-masq","--kube-subnet-mgr","--iface={{.FlannelInterface}}"]
{{- else}}
command: ["/opt/bin/flanneld","--ip-masq","--kube-subnet-mgr"]
{{- end}}
securityContext: securityContext:
privileged: true privileged: true
env: env:
@@ -101,7 +130,7 @@ spec:
- name: flannel-cfg - name: flannel-cfg
mountPath: /etc/kube-flannel/ mountPath: /etc/kube-flannel/
- name: install-cni - name: install-cni
image: ` + flannelConfig[FlannelCNIImage] + ` image: {{.CNIImage}}
command: ["/install-cni.sh"] command: ["/install-cni.sh"]
env: env:
# The CNI network config to install on each node. # The CNI network config to install on each node.
@@ -145,46 +174,3 @@ kind: ServiceAccount
metadata: metadata:
name: flannel name: flannel
namespace: kube-system` namespace: kube-system`
}
func getFlannelRBACManifest() string {
return `
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch`
}

35
templates/job-deployer.go Normal file
View File

@@ -0,0 +1,35 @@
package templates
const JobDeployerTemplate = `
{{- $addonName := .AddonName }}
{{- $nodeName := .NodeName }}
{{- $image := .Image }}
apiVersion: batch/v1
kind: Job
metadata:
name: {{$addonName}}-deploy-job
spec:
template:
metadata:
name: pi
spec:
hostNetwork: true
serviceAccountName: rke-job-deployer
nodeName: {{$nodeName}}
containers:
- name: {{$addonName}}-pod
image: {{$image}}
command: [ "kubectl", "apply", "-f" , "/etc/config/{{$addonName}}.yaml"]
volumeMounts:
- name: config-volume
mountPath: /etc/config
volumes:
- name: config-volume
configMap:
# Provide the name of the ConfigMap containing the files you want
# to add to the container
name: {{$addonName}}
items:
- key: {{$addonName}}
path: {{$addonName}}.yaml
restartPolicy: Never`

219
templates/kubedns.go Normal file
View File

@@ -0,0 +1,219 @@
package templates
const KubeDNSTemplate = `
---
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: kube-dns-autoscaler
namespace: kube-system
labels:
k8s-app: kube-dns-autoscaler
spec:
template:
metadata:
labels:
k8s-app: kube-dns-autoscaler
spec:
containers:
- name: autoscaler
image: {{.KubeDNSAutoScalerImage}}
resources:
requests:
cpu: "20m"
memory: "10Mi"
command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=kube-dns-autoscaler
- --target=Deployment/kube-dns
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
# If using small nodes, "nodesPerReplica" should dominate.
- --default-params={"linear":{"coresPerReplica":128,"nodesPerReplica":4,"min":1}}
- --logtostderr=true
- --v=2
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-dns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
volumes:
- name: kube-dns-config
configMap:
name: kube-dns
optional: true
containers:
- name: kubedns
image: {{.KubeDNSImage}}
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
livenessProbe:
httpGet:
path: /healthcheck/kubedns
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
# we poll on pod startup for the Kubernetes master service and
# only setup the /readiness HTTP server once that's available.
initialDelaySeconds: 3
timeoutSeconds: 5
args:
- --domain={{.ClusterDomain}}.
- --dns-port=10053
- --config-dir=/kube-dns-config
- --v=2
env:
- name: PROMETHEUS_PORT
value: "10055"
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- containerPort: 10055
name: metrics
protocol: TCP
volumeMounts:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: {{.DNSMasqImage}}
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- -v=2
- -logtostderr
- -configDir=/etc/k8s/dns/dnsmasq-nanny
- -restartDnsmasq=true
- --
- -k
- --cache-size=1000
- --log-facility=-
- --server=/{{.ClusterDomain}}/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
- --server=/ip6.arpa/127.0.0.1#10053
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
resources:
requests:
cpu: 150m
memory: 20Mi
volumeMounts:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: {{.KubednsSidecarImage}}
livenessProbe:
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- --v=2
- --logtostderr
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{.ClusterDomain}},5,A
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{.ClusterDomain}},5,A
ports:
- containerPort: 10054
name: metrics
protocol: TCP
resources:
requests:
memory: 20Mi
cpu: 10m
dnsPolicy: Default # Don't use cluster DNS.
serviceAccountName: kube-dns
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "KubeDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: {{.ClusterDNSServer}}
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP`

15
templates/templates.go Normal file
View File

@@ -0,0 +1,15 @@
package templates
import (
"bytes"
"text/template"
)
func CompileTemplateFromMap(tmplt string, configMap map[string]string) (string, error) {
out := new(bytes.Buffer)
t := template.Must(template.New("compiled_template").Parse(tmplt))
if err := t.Execute(out, configMap); err != nil {
return "", err
}
return out.String(), nil
}

View File

@@ -1,13 +1,7 @@
package network package templates
import "github.com/rancher/rke/services" const WeaveTemplate = `
---
func GetWeaveManifest(weaveConfig map[string]string) string {
rbacConfig := ""
if weaveConfig[RBACConfig] == services.RBACAuthorizationMode {
rbacConfig = getWeaveRBACManifest()
}
return `
# This ConfigMap can be used to configure a self-hosted Weave Net installation. # This ConfigMap can be used to configure a self-hosted Weave Net installation.
apiVersion: v1 apiVersion: v1
kind: List kind: List
@@ -41,8 +35,8 @@ items:
apiVersion: v1 apiVersion: v1
fieldPath: spec.nodeName fieldPath: spec.nodeName
- name: IPALLOC_RANGE - name: IPALLOC_RANGE
value: "` + weaveConfig[ClusterCIDR] + `" value: "{{.ClusterCIDR}}"
image: ` + weaveConfig[WeaveImage] + ` image: {{.Image}}
livenessProbe: livenessProbe:
httpGet: httpGet:
host: 127.0.0.1 host: 127.0.0.1
@@ -77,7 +71,7 @@ items:
fieldRef: fieldRef:
apiVersion: v1 apiVersion: v1
fieldPath: spec.nodeName fieldPath: spec.nodeName
image: ` + weaveConfig[WeaveCNIImage] + ` image: {{.CNIImage}}
resources: resources:
requests: requests:
cpu: 10m cpu: 10m
@@ -119,12 +113,7 @@ items:
path: /run/xtables.lock path: /run/xtables.lock
updateStrategy: updateStrategy:
type: RollingUpdate type: RollingUpdate
{{- if eq .RBACConfig "rbac"}}
` + rbacConfig
}
func getWeaveRBACManifest() string {
return `
--- ---
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
@@ -213,6 +202,6 @@ roleRef:
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: weave-net name: weave-net
namespace: kube-system` namespace: kube-system
{{- end}}
} `