From e0e185972ba5a0857aeab8853f460272d79f364b Mon Sep 17 00:00:00 2001 From: moelsayed Date: Sat, 16 Dec 2017 05:37:45 +0200 Subject: [PATCH] Use go templates for addons, network plugins and other manifests --- addons/addons.go | 39 +--- addons/addons_test.go | 7 +- addons/kubedns.go | 232 +------------------ authz/authz.go | 7 +- authz/psp.go | 7 +- cluster/addons.go | 10 +- cluster/network.go | 162 +++++++++----- network/network.go | 23 -- authz/manifests.go => templates/authz.go | 8 +- {network => templates}/calico.go | 205 ++++++++--------- {network => templates}/canal.go | 271 +++++++++++------------ {network => templates}/flannel.go | 114 +++++----- templates/job-deployer.go | 35 +++ templates/kubedns.go | 219 ++++++++++++++++++ templates/templates.go | 15 ++ {network => templates}/weave.go | 31 +-- 16 files changed, 702 insertions(+), 683 deletions(-) delete mode 100644 network/network.go rename authz/manifests.go => templates/authz.go (94%) rename {network => templates}/calico.go (90%) rename {network => templates}/canal.go (92%) rename {network => templates}/flannel.go (83%) create mode 100644 templates/job-deployer.go create mode 100644 templates/kubedns.go create mode 100644 templates/templates.go rename {network => templates}/weave.go (89%) diff --git a/addons/addons.go b/addons/addons.go index b22f2da8..eeddc49c 100644 --- a/addons/addons.go +++ b/addons/addons.go @@ -1,33 +1,12 @@ package addons -func GetAddonsExcuteJob(addonName, nodeName, image string) string { - return `apiVersion: batch/v1 -kind: Job -metadata: - name: ` + addonName + `-deploy-job -spec: - template: - metadata: - name: pi - spec: - hostNetwork: true - serviceAccountName: rke-job-deployer - nodeName: ` + nodeName + ` - containers: - - name: ` + addonName + `-pod - image: ` + image + ` - command: [ "kubectl", "apply", "-f" , "/etc/config/` + addonName + `.yaml"] - volumeMounts: - - name: config-volume - mountPath: /etc/config - volumes: - - name: config-volume - configMap: - # Provide the name of the ConfigMap containing the files you want - # to add to the container - name: ` + addonName + ` - items: - - key: ` + addonName + ` - path: ` + addonName + `.yaml - restartPolicy: Never` +import "github.com/rancher/rke/templates" + +func GetAddonsExcuteJob(addonName, nodeName, image string) (string, error) { + jobConfig := map[string]string{ + "AddonName": addonName, + "NodeName": nodeName, + "Image": image, + } + return templates.CompileTemplateFromMap(templates.JobDeployerTemplate, jobConfig) } diff --git a/addons/addons_test.go b/addons/addons_test.go index fec53053..895972c5 100644 --- a/addons/addons_test.go +++ b/addons/addons_test.go @@ -17,10 +17,13 @@ const ( ) func TestJobManifest(t *testing.T) { - jobYaml := GetAddonsExcuteJob(FakeAddonName, FakeNodeName, FakeAddonImage) + jobYaml, err := GetAddonsExcuteJob(FakeAddonName, FakeNodeName, FakeAddonImage) + if err != nil { + t.Fatalf("Failed to get addon execute job: %v", err) + } job := v1.Job{} decoder := yamlutil.NewYAMLToJSONDecoder(bytes.NewReader([]byte(jobYaml))) - err := decoder.Decode(&job) + err = decoder.Decode(&job) if err != nil { t.Fatalf("Failed To decode Job yaml: %v", err) } diff --git a/addons/kubedns.go b/addons/kubedns.go index 363d9c08..71c677be 100644 --- a/addons/kubedns.go +++ b/addons/kubedns.go @@ -1,231 +1,17 @@ package addons +import "github.com/rancher/rke/templates" + const ( - KubeDNSImage = "kubeDNSImage" + KubeDNSImage = "KubeDNSImage" DNSMasqImage = "DNSMasqImage" - KubeDNSSidecarImage = "kubednsSidecarImage" - KubeDNSAutoScalerImage = "kubeDNSAutoScalerImage" - KubeDNSServer = "clusterDNSServer" - KubeDNSClusterDomain = "clusterDomain" + KubeDNSSidecarImage = "KubednsSidecarImage" + KubeDNSAutoScalerImage = "KubeDNSAutoScalerImage" + KubeDNSServer = "ClusterDNSServer" + KubeDNSClusterDomain = "ClusterDomain" ) -func GetKubeDNSManifest(kubeDNSConfig map[string]string) string { - return `--- -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - name: kube-dns-autoscaler - namespace: kube-system - labels: - k8s-app: kube-dns-autoscaler -spec: - template: - metadata: - labels: - k8s-app: kube-dns-autoscaler - spec: - containers: - - name: autoscaler - image: ` + kubeDNSConfig[KubeDNSAutoScalerImage] + ` - resources: - requests: - cpu: "20m" - memory: "10Mi" - command: - - /cluster-proportional-autoscaler - - --namespace=kube-system - - --configmap=kube-dns-autoscaler - - --target=Deployment/kube-dns - # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate. - # If using small nodes, "nodesPerReplica" should dominate. - - --default-params={"linear":{"coresPerReplica":128,"nodesPerReplica":4,"min":1}} - - --logtostderr=true - - --v=2 +func GetKubeDNSManifest(kubeDNSConfig map[string]string) (string, error) { ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kube-dns - namespace: kube-system - labels: - kubernetes.io/cluster-service: "true" - addonmanager.kubernetes.io/mode: Reconcile ---- -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: kube-dns - namespace: kube-system - labels: - k8s-app: kube-dns - kubernetes.io/cluster-service: "true" - addonmanager.kubernetes.io/mode: Reconcile -spec: - # replicas: not specified here: - # 1. In order to make Addon Manager do not reconcile this replicas parameter. - # 2. Default is 1. - # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on. - strategy: - rollingUpdate: - maxSurge: 10% - maxUnavailable: 0 - selector: - matchLabels: - k8s-app: kube-dns - template: - metadata: - labels: - k8s-app: kube-dns - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - tolerations: - - key: "CriticalAddonsOnly" - operator: "Exists" - volumes: - - name: kube-dns-config - configMap: - name: kube-dns - optional: true - containers: - - name: kubedns - image: ` + kubeDNSConfig[KubeDNSImage] + ` - resources: - # TODO: Set memory limits when we've profiled the container for large - # clusters, then set request = limit to keep this container in - # guaranteed class. Currently, this container falls into the - # "burstable" category so the kubelet doesn't backoff from restarting it. - limits: - memory: 170Mi - requests: - cpu: 100m - memory: 70Mi - livenessProbe: - httpGet: - path: /healthcheck/kubedns - port: 10054 - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - readinessProbe: - httpGet: - path: /readiness - port: 8081 - scheme: HTTP - # we poll on pod startup for the Kubernetes master service and - # only setup the /readiness HTTP server once that's available. - initialDelaySeconds: 3 - timeoutSeconds: 5 - args: - - --domain=` + kubeDNSConfig[KubeDNSClusterDomain] + `. - - --dns-port=10053 - - --config-dir=/kube-dns-config - - --v=2 - env: - - name: PROMETHEUS_PORT - value: "10055" - ports: - - containerPort: 10053 - name: dns-local - protocol: UDP - - containerPort: 10053 - name: dns-tcp-local - protocol: TCP - - containerPort: 10055 - name: metrics - protocol: TCP - volumeMounts: - - name: kube-dns-config - mountPath: /kube-dns-config - - name: dnsmasq - image: ` + kubeDNSConfig[DNSMasqImage] + ` - livenessProbe: - httpGet: - path: /healthcheck/dnsmasq - port: 10054 - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - args: - - -v=2 - - -logtostderr - - -configDir=/etc/k8s/dns/dnsmasq-nanny - - -restartDnsmasq=true - - -- - - -k - - --cache-size=1000 - - --log-facility=- - - --server=/` + kubeDNSConfig[KubeDNSClusterDomain] + `/127.0.0.1#10053 - - --server=/in-addr.arpa/127.0.0.1#10053 - - --server=/ip6.arpa/127.0.0.1#10053 - ports: - - containerPort: 53 - name: dns - protocol: UDP - - containerPort: 53 - name: dns-tcp - protocol: TCP - # see: https://github.com/kubernetes/kubernetes/issues/29055 for details - resources: - requests: - cpu: 150m - memory: 20Mi - volumeMounts: - - name: kube-dns-config - mountPath: /etc/k8s/dns/dnsmasq-nanny - - name: sidecar - image: ` + kubeDNSConfig[KubeDNSSidecarImage] + ` - livenessProbe: - httpGet: - path: /metrics - port: 10054 - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - args: - - --v=2 - - --logtostderr - - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.` + kubeDNSConfig[KubeDNSClusterDomain] + `,5,A - - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.` + kubeDNSConfig[KubeDNSClusterDomain] + `,5,A - ports: - - containerPort: 10054 - name: metrics - protocol: TCP - resources: - requests: - memory: 20Mi - cpu: 10m - dnsPolicy: Default # Don't use cluster DNS. - serviceAccountName: kube-dns ---- -apiVersion: v1 -kind: Service -metadata: - name: kube-dns - namespace: kube-system - labels: - k8s-app: kube-dns - kubernetes.io/cluster-service: "true" - addonmanager.kubernetes.io/mode: Reconcile - kubernetes.io/name: "KubeDNS" -spec: - selector: - k8s-app: kube-dns - clusterIP: ` + kubeDNSConfig[KubeDNSServer] + ` - ports: - - name: dns - port: 53 - protocol: UDP - - name: dns-tcp - port: 53 - protocol: TCP - - ` + return templates.CompileTemplateFromMap(templates.KubeDNSTemplate, kubeDNSConfig) } diff --git a/authz/authz.go b/authz/authz.go index 53e713ad..fca11296 100644 --- a/authz/authz.go +++ b/authz/authz.go @@ -2,6 +2,7 @@ package authz import ( "github.com/rancher/rke/k8s" + "github.com/rancher/rke/templates" "github.com/sirupsen/logrus" ) @@ -11,10 +12,10 @@ func ApplyJobDeployerServiceAccount(kubeConfigPath string) error { if err != nil { return err } - if err := k8s.UpdateClusterRoleBindingFromYaml(k8sClient, jobDeployerClusterRoleBinding); err != nil { + if err := k8s.UpdateClusterRoleBindingFromYaml(k8sClient, templates.JobDeployerClusterRoleBinding); err != nil { return err } - if err := k8s.UpdateServiceAccountFromYaml(k8sClient, jobDeployerServiceAccount); err != nil { + if err := k8s.UpdateServiceAccountFromYaml(k8sClient, templates.JobDeployerServiceAccount); err != nil { return err } logrus.Infof("[authz] rke-job-deployer ServiceAccount created successfully") @@ -27,7 +28,7 @@ func ApplySystemNodeClusterRoleBinding(kubeConfigPath string) error { if err != nil { return err } - if err := k8s.UpdateClusterRoleBindingFromYaml(k8sClient, systemNodeClusterRoleBinding); err != nil { + if err := k8s.UpdateClusterRoleBindingFromYaml(k8sClient, templates.SystemNodeClusterRoleBinding); err != nil { return err } logrus.Infof("[authz] system:node ClusterRoleBinding created successfully") diff --git a/authz/psp.go b/authz/psp.go index 67efa174..cef39f39 100644 --- a/authz/psp.go +++ b/authz/psp.go @@ -2,6 +2,7 @@ package authz import ( "github.com/rancher/rke/k8s" + "github.com/rancher/rke/templates" "github.com/sirupsen/logrus" ) @@ -11,7 +12,7 @@ func ApplyDefaultPodSecurityPolicy(kubeConfigPath string) error { if err != nil { return err } - if err := k8s.UpdatePodSecurityPolicyFromYaml(k8sClient, DefaultPodSecurityPolicy); err != nil { + if err := k8s.UpdatePodSecurityPolicyFromYaml(k8sClient, templates.DefaultPodSecurityPolicy); err != nil { return err } logrus.Infof("[authz] Default PodSecurityPolicy applied successfully") @@ -24,10 +25,10 @@ func ApplyDefaultPodSecurityPolicyRole(kubeConfigPath string) error { if err != nil { return err } - if err := k8s.UpdateRoleFromYaml(k8sClient, DefaultPodSecurityRole); err != nil { + if err := k8s.UpdateRoleFromYaml(k8sClient, templates.DefaultPodSecurityRole); err != nil { return err } - if err := k8s.UpdateRoleBindingFromYaml(k8sClient, DefaultPodSecurityRoleBinding); err != nil { + if err := k8s.UpdateRoleBindingFromYaml(k8sClient, templates.DefaultPodSecurityRoleBinding); err != nil { return err } logrus.Infof("[authz] Default PodSecurityPolicy Role and RoleBinding applied successfully") diff --git a/cluster/addons.go b/cluster/addons.go index 8028b1eb..faccb13f 100644 --- a/cluster/addons.go +++ b/cluster/addons.go @@ -44,7 +44,10 @@ func (c *Cluster) deployKubeDNS() error { addons.KubeDNSSidecarImage: c.SystemImages[KubeDNSSidecarImage], addons.KubeDNSAutoScalerImage: c.SystemImages[KubeDNSAutoScalerImage], } - kubeDNSYaml := addons.GetKubeDNSManifest(kubeDNSConfig) + kubeDNSYaml, err := addons.GetKubeDNSManifest(kubeDNSConfig) + if err != nil { + return err + } if err := c.doAddonDeploy(kubeDNSYaml, KubeDNSAddonResourceName); err != nil { return err } @@ -62,7 +65,10 @@ func (c *Cluster) doAddonDeploy(addonYaml, resourceName string) error { logrus.Infof("[addons] Executing deploy job..") - addonJob := addons.GetAddonsExcuteJob(resourceName, c.ControlPlaneHosts[0].HostnameOverride, c.Services.KubeAPI.Image) + addonJob, err := addons.GetAddonsExcuteJob(resourceName, c.ControlPlaneHosts[0].HostnameOverride, c.Services.KubeAPI.Image) + if err != nil { + return fmt.Errorf("Failed to deploy addon execute job: %v", err) + } err = c.ApplySystemAddonExcuteJob(addonJob) if err != nil { return fmt.Errorf("Failed to deploy addon execute job: %v", err) diff --git a/cluster/network.go b/cluster/network.go index dc581cd5..15c1495f 100644 --- a/cluster/network.go +++ b/cluster/network.go @@ -3,9 +3,9 @@ package cluster import ( "fmt" - "github.com/rancher/rke/network" "github.com/rancher/rke/pki" "github.com/rancher/rke/services" + "github.com/rancher/rke/templates" "github.com/sirupsen/logrus" ) @@ -17,12 +17,12 @@ const ( FlannelCNIImage = "flannel_cni_image" FlannelIface = "flannel_iface" - CalicoNetworkPlugin = "calico" - CalicoNodeImage = "calico_node_image" - CalicoCNIImage = "calico_cni_image" - CalicoControllersImages = "calico_controllers_image" - CalicoctlImage = "calicoctl_image" - CalicoCloudProvider = "calico_cloud_provider" + CalicoNetworkPlugin = "calico" + CalicoNodeImage = "calico_node_image" + CalicoCNIImage = "calico_cni_image" + CalicoControllersImage = "calico_controllers_image" + CalicoctlImage = "calicoctl_image" + CalicoCloudProvider = "calico_cloud_provider" CanalNetworkPlugin = "canal" CanalNodeImage = "canal_node_image" @@ -32,6 +32,35 @@ const ( WeaveNetworkPlugin = "weave" WeaveImage = "weave_node_image" WeaveCNIImage = "weave_cni_image" + + // List of map keys to be used with network templates + + // EtcdEndpoints is the server address for Etcd, used by calico + EtcdEndpoints = "EtcdEndpoints" + // APIRoot is the kubernetes API address + APIRoot = "APIRoot" + // kubernetes client certificates and kubeconfig paths + + ClientCert = "ClientCert" + ClientKey = "ClientKey" + ClientCA = "ClientCA" + KubeCfg = "KubeCfg" + + ClusterCIDR = "ClusterCIDR" + // Images key names + + Image = "Image" + CNIImage = "CNIImage" + NodeImage = "NodeImage" + ControllersImage = "ControllersImage" + CanalFlannelImg = "CanalFlannelImg" + + Calicoctl = "Calicoctl" + + FlannelInterface = "FlannelInterface" + CloudProvider = "CloudProvider" + AWSCloudProvider = "aws" + RBACConfig = "RBACConfig" ) func (c *Cluster) DeployNetworkPlugin() error { @@ -52,60 +81,73 @@ func (c *Cluster) DeployNetworkPlugin() error { func (c *Cluster) doFlannelDeploy() error { flannelConfig := map[string]string{ - network.ClusterCIDR: c.ClusterCIDR, - network.FlannelImage: c.Network.Options[FlannelImage], - network.FlannelCNIImage: c.Network.Options[FlannelCNIImage], - network.FlannelIface: c.Network.Options[FlannelIface], - network.RBACConfig: c.Authorization.Mode, + ClusterCIDR: c.ClusterCIDR, + Image: c.Network.Options[FlannelImage], + CNIImage: c.Network.Options[FlannelCNIImage], + FlannelInterface: c.Network.Options[FlannelIface], + RBACConfig: c.Authorization.Mode, + } + pluginYaml, err := c.getNetworkPluginManifest(flannelConfig) + if err != nil { + return err } - pluginYaml := network.GetFlannelManifest(flannelConfig) return c.doAddonDeploy(pluginYaml, NetworkPluginResourceName) } func (c *Cluster) doCalicoDeploy() error { calicoConfig := map[string]string{ - network.EtcdEndpoints: services.GetEtcdConnString(c.EtcdHosts), - network.APIRoot: "https://127.0.0.1:6443", - network.ClientCert: pki.KubeNodeCertPath, - network.ClientKey: pki.KubeNodeKeyPath, - network.ClientCA: pki.CACertPath, - network.KubeCfg: pki.KubeNodeConfigPath, - network.ClusterCIDR: c.ClusterCIDR, - network.CNIImage: c.Network.Options[CalicoCNIImage], - network.NodeImage: c.Network.Options[CalicoNodeImage], - network.ControllersImage: c.Network.Options[CalicoControllersImages], - network.CalicoctlImage: c.Network.Options[CalicoctlImage], - network.CloudProvider: c.Network.Options[CalicoCloudProvider], - network.RBACConfig: c.Authorization.Mode, + EtcdEndpoints: services.GetEtcdConnString(c.EtcdHosts), + APIRoot: "https://127.0.0.1:6443", + ClientCert: pki.KubeNodeCertPath, + ClientKey: pki.KubeNodeKeyPath, + ClientCA: pki.CACertPath, + KubeCfg: pki.KubeNodeConfigPath, + ClusterCIDR: c.ClusterCIDR, + CNIImage: c.Network.Options[CalicoCNIImage], + NodeImage: c.Network.Options[CalicoNodeImage], + ControllersImage: c.Network.Options[CalicoControllersImage], + Calicoctl: c.Network.Options[CalicoctlImage], + CloudProvider: c.Network.Options[CalicoCloudProvider], + RBACConfig: c.Authorization.Mode, + } + pluginYaml, err := c.getNetworkPluginManifest(calicoConfig) + if err != nil { + return err } - pluginYaml := network.GetCalicoManifest(calicoConfig) return c.doAddonDeploy(pluginYaml, NetworkPluginResourceName) } func (c *Cluster) doCanalDeploy() error { canalConfig := map[string]string{ - network.ClientCert: pki.KubeNodeCertPath, - network.ClientKey: pki.KubeNodeKeyPath, - network.ClientCA: pki.CACertPath, - network.KubeCfg: pki.KubeNodeConfigPath, - network.ClusterCIDR: c.ClusterCIDR, - network.NodeImage: c.Network.Options[CanalNodeImage], - network.CNIImage: c.Network.Options[CanalCNIImage], - network.FlannelImage: c.Network.Options[CanalFlannelImage], - network.RBACConfig: c.Authorization.Mode, + ClientCert: pki.KubeNodeCertPath, + APIRoot: "https://127.0.0.1:6443", + ClientKey: pki.KubeNodeKeyPath, + ClientCA: pki.CACertPath, + KubeCfg: pki.KubeNodeConfigPath, + ClusterCIDR: c.ClusterCIDR, + NodeImage: c.Network.Options[CanalNodeImage], + CNIImage: c.Network.Options[CanalCNIImage], + CanalFlannelImg: c.Network.Options[CanalFlannelImage], + RBACConfig: c.Authorization.Mode, + } + pluginYaml, err := c.getNetworkPluginManifest(canalConfig) + if err != nil { + return err } - pluginYaml := network.GetCanalManifest(canalConfig) return c.doAddonDeploy(pluginYaml, NetworkPluginResourceName) } func (c *Cluster) doWeaveDeploy() error { weaveConfig := map[string]string{ - network.ClusterCIDR: c.ClusterCIDR, - network.WeaveImage: c.Network.Options[WeaveImage], - network.WeaveCNIImage: c.Network.Options[WeaveCNIImage], - network.RBACConfig: c.Authorization.Mode, + ClusterCIDR: c.ClusterCIDR, + Image: c.Network.Options[WeaveImage], + CNIImage: c.Network.Options[WeaveCNIImage], + RBACConfig: c.Authorization.Mode, + } + pluginYaml, err := c.getNetworkPluginManifest(weaveConfig) + if err != nil { + return err } - pluginYaml := network.GetWeaveManifest(weaveConfig) return c.doAddonDeploy(pluginYaml, NetworkPluginResourceName) } @@ -117,38 +159,52 @@ func (c *Cluster) setClusterNetworkDefaults() { c.Network.Options = make(map[string]string) } networkPluginConfigDefaultsMap := make(map[string]string) - switch { - case c.Network.Plugin == FlannelNetworkPlugin: + switch c.Network.Plugin { + case FlannelNetworkPlugin: networkPluginConfigDefaultsMap = map[string]string{ FlannelImage: DefaultFlannelImage, FlannelCNIImage: DefaultFlannelCNIImage, } - case c.Network.Plugin == CalicoNetworkPlugin: + case CalicoNetworkPlugin: networkPluginConfigDefaultsMap = map[string]string{ - CalicoCNIImage: DefaultCalicoCNIImage, - CalicoNodeImage: DefaultCalicoNodeImage, - CalicoControllersImages: DefaultCalicoControllersImage, - CalicoCloudProvider: DefaultNetworkCloudProvider, - CalicoctlImage: DefaultCalicoctlImage, + CalicoCNIImage: DefaultCalicoCNIImage, + CalicoNodeImage: DefaultCalicoNodeImage, + CalicoControllersImage: DefaultCalicoControllersImage, + CalicoCloudProvider: DefaultNetworkCloudProvider, + CalicoctlImage: DefaultCalicoctlImage, } - case c.Network.Plugin == CanalNetworkPlugin: + case CanalNetworkPlugin: networkPluginConfigDefaultsMap = map[string]string{ CanalCNIImage: DefaultCanalCNIImage, CanalNodeImage: DefaultCanalNodeImage, CanalFlannelImage: DefaultCanalFlannelImage, } - case c.Network.Plugin == WeaveNetworkPlugin: + case WeaveNetworkPlugin: networkPluginConfigDefaultsMap = map[string]string{ WeaveImage: DefaultWeaveImage, WeaveCNIImage: DefaultWeaveCNIImage, } } - for k, v := range networkPluginConfigDefaultsMap { setDefaultIfEmptyMapValue(c.Network.Options, k, v) } } + +func (c *Cluster) getNetworkPluginManifest(pluginConfig map[string]string) (string, error) { + switch c.Network.Plugin { + case FlannelNetworkPlugin: + return templates.CompileTemplateFromMap(templates.FlannelTemplate, pluginConfig) + case CalicoNetworkPlugin: + return templates.CompileTemplateFromMap(templates.CalicoTemplate, pluginConfig) + case CanalNetworkPlugin: + return templates.CompileTemplateFromMap(templates.CanalTemplate, pluginConfig) + case WeaveNetworkPlugin: + return templates.CompileTemplateFromMap(templates.WeaveTemplate, pluginConfig) + default: + return "", fmt.Errorf("[network] Unsupported network plugin: %s", c.Network.Plugin) + } +} diff --git a/network/network.go b/network/network.go deleted file mode 100644 index a6f4705c..00000000 --- a/network/network.go +++ /dev/null @@ -1,23 +0,0 @@ -package network - -const ( - EtcdEndpoints = "etcdEndpoints" - APIRoot = "apiRoot" - ClientCert = "clientCert" - ClientKey = "clientKey" - ClientCA = "clientCA" - KubeCfg = "kubeCfg" - ClusterCIDR = "clusterCIDR" - CNIImage = "cniImage" - NodeImage = "nodeImage" - ControllersImage = "controllersImage" - CalicoctlImage = "calicoctlImage" - FlannelImage = "flannelImage" - FlannelCNIImage = "flannelCNIImage" - FlannelIface = "flannelIface" - CloudProvider = "cloudprovider" - AWSCloudProvider = "aws" - RBACConfig = "rbacConfig" - WeaveImage = "weaveImage" - WeaveCNIImage = "weaveCNIImage" -) diff --git a/authz/manifests.go b/templates/authz.go similarity index 94% rename from authz/manifests.go rename to templates/authz.go index b107fdb4..e9dda156 100644 --- a/authz/manifests.go +++ b/templates/authz.go @@ -1,7 +1,7 @@ -package authz +package templates const ( - systemNodeClusterRoleBinding = ` + SystemNodeClusterRoleBinding = ` apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: @@ -19,14 +19,14 @@ subjects: name: system:nodes apiGroup: rbac.authorization.k8s.io` - jobDeployerServiceAccount = ` + JobDeployerServiceAccount = ` apiVersion: v1 kind: ServiceAccount metadata: name: rke-job-deployer namespace: kube-system` - jobDeployerClusterRoleBinding = ` + JobDeployerClusterRoleBinding = ` apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: diff --git a/network/calico.go b/templates/calico.go similarity index 90% rename from network/calico.go rename to templates/calico.go index c326cc73..a90265e0 100644 --- a/network/calico.go +++ b/templates/calico.go @@ -1,19 +1,79 @@ -package network +package templates -import "github.com/rancher/rke/services" +const CalicoTemplate = ` +{{if eq .RBACConfig "rbac"}} +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: calico-cni-plugin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-cni-plugin +subjects: +- kind: ServiceAccount + name: calico-cni-plugin + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: calico-cni-plugin +rules: + - apiGroups: [""] + resources: + - pods + - nodes + verbs: + - get +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-cni-plugin + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: calico-kube-controllers +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-kube-controllers +subjects: +- kind: ServiceAccount + name: calico-kube-controllers + namespace: kube-system -func GetCalicoManifest(calicoConfig map[string]string) string { - awsIPPool := "" - if calicoConfig[CloudProvider] == AWSCloudProvider { - awsIPPool = getCalicoAWSIPPoolManifest(calicoConfig) - } +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: calico-kube-controllers +rules: + - apiGroups: + - "" + - extensions + resources: + - pods + - namespaces + - networkpolicies + - nodes + verbs: + - watch + - list +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-kube-controllers + namespace: kube-system - rbacConfig := "" - if calicoConfig[RBACConfig] == services.RBACAuthorizationMode { - rbacConfig = getCalicoRBACManifest() - } +## end rbac here +{{end}} - return rbacConfig + ` --- # Calico Version master # https://docs.projectcalico.org/master/releases#master @@ -30,7 +90,7 @@ metadata: namespace: kube-system data: # Configure this with the location of your etcd cluster. - etcd_endpoints: "` + calicoConfig[EtcdEndpoints] + `" + etcd_endpoints: "{{.EtcdEndpoints}}" # Configure the Calico backend to use. calico_backend: "bird" @@ -43,7 +103,7 @@ data: "plugins": [ { "type": "calico", - "etcd_endpoints": "` + calicoConfig[EtcdEndpoints] + `", + "etcd_endpoints": "{{.EtcdEndpoints}}", "etcd_key_file": "", "etcd_cert_file": "", "etcd_ca_cert_file": "", @@ -54,13 +114,13 @@ data: }, "policy": { "type": "k8s", - "k8s_api_root": "` + calicoConfig[APIRoot] + `", - "k8s_client_certificate": "` + calicoConfig[ClientCert] + `", - "k8s_client_key": "` + calicoConfig[ClientKey] + `", - "k8s_certificate_authority": "` + calicoConfig[ClientCA] + `" + "k8s_api_root": "{{.APIRoot}}", + "k8s_client_certificate": "{{.ClientCert}}", + "k8s_client_key": "{{.ClientKey}}", + "k8s_certificate_authority": "{{.ClientCA}}" }, "kubernetes": { - "kubeconfig": "` + calicoConfig[KubeCfg] + `" + "kubeconfig": "{{.KubeCfg}}" } }, { @@ -137,7 +197,7 @@ spec: # container programs network policy and routes on each # host. - name: calico-node - image: ` + calicoConfig[NodeImage] + ` + image: {{.NodeImage}} env: # The location of the Calico etcd cluster. - name: ETCD_ENDPOINTS @@ -162,7 +222,7 @@ spec: value: "ACCEPT" # Configure the IP Pool from which Pod IPs will be chosen. - name: CALICO_IPV4POOL_CIDR - value: "` + calicoConfig[ClusterCIDR] + `" + value: "{{.ClusterCIDR}}" - name: CALICO_IPV4POOL_IPIP value: "Always" # Disable IPv6 on Kubernetes. @@ -228,7 +288,7 @@ spec: # This container installs the Calico CNI binaries # and CNI network config file on each node. - name: install-cni - image: ` + calicoConfig[CNIImage] + ` + image: {{.CNIImage}} command: ["/install-cni.sh"] env: # Name of the CNI config file to create. @@ -317,7 +377,7 @@ spec: operator: "Exists" containers: - name: calico-kube-controllers - image: ` + calicoConfig[ControllersImage] + ` + image: {{.ControllersImage}} env: # The location of the Calico etcd cluster. - name: ETCD_ENDPOINTS @@ -384,7 +444,7 @@ spec: serviceAccountName: calico-kube-controllers containers: - name: calico-policy-controller - image: ` + calicoConfig[ControllersImage] + ` + image: {{.ControllersImage}} env: # The location of the Calico etcd cluster. - name: ETCD_ENDPOINTS @@ -407,12 +467,10 @@ kind: ServiceAccount metadata: name: calico-node namespace: kube-system -` + awsIPPool + ` -` -} -func getCalicoAWSIPPoolManifest(calicoConfig map[string]string) string { - return ` + +{{if eq .CloudProvider "aws"}} +## aws stuff here --- kind: ConfigMap apiVersion: v1 @@ -424,7 +482,7 @@ data: apiVersion: v1 kind: ipPool metadata: - cidr: ` + calicoConfig[ClusterCIDR] + ` + cidr: {{.ClusterCIDR}} spec: nat-outgoing: true --- @@ -438,7 +496,7 @@ spec: restartPolicy: OnFailure containers: - name: calicoctl - image: ` + calicoConfig[CalicoctlImage] + ` + image: {{.Calicoctl}} command: ["/bin/sh", "-c", "calicoctl apply -f aws-ippool.yaml"] env: - name: ETCD_ENDPOINTS @@ -456,88 +514,5 @@ spec: items: - key: aws-ippool path: aws-ippool.yaml - ` -} - -func getCalicoRBACManifest() string { - return ` ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: calico-cni-plugin -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico-cni-plugin -subjects: -- kind: ServiceAccount - name: calico-cni-plugin - namespace: kube-system - ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico-cni-plugin -rules: - - apiGroups: [""] - resources: - - pods - - nodes - verbs: - - get - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico-cni-plugin - namespace: kube-system - ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: calico-kube-controllers -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico-kube-controllers -subjects: -- kind: ServiceAccount - name: calico-kube-controllers - namespace: kube-system - ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico-kube-controllers -rules: - - apiGroups: - - "" - - extensions - resources: - - pods - - namespaces - - networkpolicies - - nodes - verbs: - - watch - - list - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico-kube-controllers - namespace: kube-system + {{end}} ` -} diff --git a/network/canal.go b/templates/canal.go similarity index 92% rename from network/canal.go rename to templates/canal.go index da9a6120..59233fd9 100644 --- a/network/canal.go +++ b/templates/canal.go @@ -1,13 +1,126 @@ -package network +package templates -import "github.com/rancher/rke/services" +const CanalTemplate = ` +{{if eq .RBACConfig "rbac"}} +--- +# Calico Roles +# Pulled from https://docs.projectcalico.org/v2.5/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: calico +rules: + - apiGroups: [""] + resources: + - namespaces + verbs: + - get + - list + - watch + - apiGroups: [""] + resources: + - pods/status + verbs: + - update + - apiGroups: [""] + resources: + - pods + verbs: + - get + - list + - watch + - apiGroups: [""] + resources: + - nodes + verbs: + - get + - list + - update + - watch + - apiGroups: ["extensions"] + resources: + - networkpolicies + verbs: + - get + - list + - watch + - apiGroups: ["crd.projectcalico.org"] + resources: + - globalfelixconfigs + - bgppeers + - globalbgpconfigs + - ippools + - globalnetworkpolicies + verbs: + - create + - get + - list + - update + - watch + +--- + +# Flannel roles +# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: flannel +rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch +--- + +# Bind the flannel ClusterRole to the canal ServiceAccount. +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: canal-flannel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel +subjects: +- kind: ServiceAccount + name: canal + namespace: kube-system + +--- + +# Bind the calico ClusterRole to the canal ServiceAccount. +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: canal-calico +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico +subjects: +- kind: ServiceAccount + name: canal + namespace: kube-system + +## end rbac +{{end}} -func GetCanalManifest(canalConfig map[string]string) string { - rbacConfig := "" - if canalConfig[RBACConfig] == services.RBACAuthorizationMode { - rbacConfig = getCanalRBACManifest() - } - return rbacConfig + ` --- # This ConfigMap can be used to configure a self-hosted Canal installation. kind: ConfigMap @@ -42,13 +155,13 @@ data: }, "policy": { "type": "k8s", - "k8s_api_root": "` + canalConfig[APIRoot] + `", - "k8s_client_certificate": "` + canalConfig[ClientCert] + `", - "k8s_client_key": "` + canalConfig[ClientKey] + `", - "k8s_certificate_authority": "` + canalConfig[ClientCA] + `" + "k8s_api_root": "{{.APIRoot}}", + "k8s_client_certificate": "{{.ClientCert}}", + "k8s_client_key": "{{.ClientKey}}", + "k8s_certificate_authority": "{{.ClientCA}}" }, "kubernetes": { - "kubeconfig": "` + canalConfig[KubeCfg] + `" + "kubeconfig": "{{.KubeCfg}}" } }, { @@ -62,7 +175,7 @@ data: # Flannel network configuration. Mounted into the flannel container. net-conf.json: | { - "Network": "` + canalConfig[ClusterCIDR] + `", + "Network": "{{.ClusterCIDR}}", "Backend": { "Type": "vxlan" } @@ -114,7 +227,7 @@ spec: # container programs network policy and routes on each # host. - name: calico-node - image: ` + canalConfig[NodeImage] + ` + image: {{.NodeImage}} env: # Use Kubernetes API as the backing datastore. - name: DATASTORE_TYPE @@ -181,7 +294,7 @@ spec: # This container installs the Calico CNI binaries # and CNI network config file on each node. - name: install-cni - image: ` + canalConfig[CNIImage] + ` + image: {{.CNIImage}} command: ["/install-cni.sh"] env: - name: CNI_CONF_NAME @@ -206,7 +319,7 @@ spec: # This container runs flannel using the kube-subnet-mgr backend # for allocating subnets. - name: kube-flannel - image: ` + canalConfig[FlannelImage] + ` + image: {{.CanalFlannelImg}} command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ] securityContext: privileged: true @@ -333,126 +446,4 @@ apiVersion: v1 kind: ServiceAccount metadata: name: canal - namespace: kube-system -` -} - -func getCanalRBACManifest() string { - return ` -# Calico Roles -# Pulled from https://docs.projectcalico.org/v2.5/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico -rules: - - apiGroups: [""] - resources: - - namespaces - verbs: - - get - - list - - watch - - apiGroups: [""] - resources: - - pods/status - verbs: - - update - - apiGroups: [""] - resources: - - pods - verbs: - - get - - list - - watch - - apiGroups: [""] - resources: - - nodes - verbs: - - get - - list - - update - - watch - - apiGroups: ["extensions"] - resources: - - networkpolicies - verbs: - - get - - list - - watch - - apiGroups: ["crd.projectcalico.org"] - resources: - - globalfelixconfigs - - bgppeers - - globalbgpconfigs - - ippools - - globalnetworkpolicies - verbs: - - create - - get - - list - - update - - watch - ---- - -# Flannel roles -# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: flannel -rules: - - apiGroups: - - "" - resources: - - pods - verbs: - - get - - apiGroups: - - "" - resources: - - nodes - verbs: - - list - - watch - - apiGroups: - - "" - resources: - - nodes/status - verbs: - - patch ---- - -# Bind the flannel ClusterRole to the canal ServiceAccount. -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: canal-flannel -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: flannel -subjects: -- kind: ServiceAccount - name: canal - namespace: kube-system - ---- - -# Bind the calico ClusterRole to the canal ServiceAccount. -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: canal-calico -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico -subjects: -- kind: ServiceAccount - name: canal - namespace: kube-system - -` -} + namespace: kube-system` diff --git a/network/flannel.go b/templates/flannel.go similarity index 83% rename from network/flannel.go rename to templates/flannel.go index a1ab7ed0..e25c4824 100644 --- a/network/flannel.go +++ b/templates/flannel.go @@ -1,21 +1,46 @@ -package network +package templates -import ( - "fmt" - - "github.com/rancher/rke/services" -) - -func GetFlannelManifest(flannelConfig map[string]string) string { - var extraArgs string - if len(flannelConfig[FlannelIface]) > 0 { - extraArgs = fmt.Sprintf(",--iface=%s", flannelConfig[FlannelIface]) - } - rbacConfig := "" - if flannelConfig[RBACConfig] == services.RBACAuthorizationMode { - rbacConfig = getFlannelRBACManifest() - } - return rbacConfig + ` +const FlannelTemplate = ` +{{- if eq .RBACConfig "rbac"}} +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: flannel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel +subjects: +- kind: ServiceAccount + name: flannel + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: flannel +rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch +{{- end}} --- kind: ConfigMap apiVersion: v1 @@ -48,7 +73,7 @@ data: } net-conf.json: | { - "Network": "` + flannelConfig[ClusterCIDR] + `", + "Network": "{{.ClusterCIDR}}", "Backend": { "Type": "vxlan" } @@ -72,7 +97,7 @@ spec: serviceAccountName: flannel containers: - name: kube-flannel - image: ` + flannelConfig[FlannelImage] + ` + image: {{.Image}} imagePullPolicy: IfNotPresent resources: limits: @@ -81,7 +106,11 @@ spec: requests: cpu: 150m memory: 64M - command: ["/opt/bin/flanneld","--ip-masq","--kube-subnet-mgr"` + extraArgs + `] + {{- if .FlannelInterface}} + command: ["/opt/bin/flanneld","--ip-masq","--kube-subnet-mgr","--iface={{.FlannelInterface}}"] + {{- else}} + command: ["/opt/bin/flanneld","--ip-masq","--kube-subnet-mgr"] + {{- end}} securityContext: privileged: true env: @@ -101,7 +130,7 @@ spec: - name: flannel-cfg mountPath: /etc/kube-flannel/ - name: install-cni - image: ` + flannelConfig[FlannelCNIImage] + ` + image: {{.CNIImage}} command: ["/install-cni.sh"] env: # The CNI network config to install on each node. @@ -145,46 +174,3 @@ kind: ServiceAccount metadata: name: flannel namespace: kube-system` -} - -func getFlannelRBACManifest() string { - return ` ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: flannel -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: flannel -subjects: -- kind: ServiceAccount - name: flannel - namespace: kube-system ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: flannel -rules: - - apiGroups: - - "" - resources: - - pods - verbs: - - get - - apiGroups: - - "" - resources: - - nodes - verbs: - - list - - watch - - apiGroups: - - "" - resources: - - nodes/status - verbs: - - patch` -} diff --git a/templates/job-deployer.go b/templates/job-deployer.go new file mode 100644 index 00000000..3c88e90b --- /dev/null +++ b/templates/job-deployer.go @@ -0,0 +1,35 @@ +package templates + +const JobDeployerTemplate = ` +{{- $addonName := .AddonName }} +{{- $nodeName := .NodeName }} +{{- $image := .Image }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{$addonName}}-deploy-job +spec: + template: + metadata: + name: pi + spec: + hostNetwork: true + serviceAccountName: rke-job-deployer + nodeName: {{$nodeName}} + containers: + - name: {{$addonName}}-pod + image: {{$image}} + command: [ "kubectl", "apply", "-f" , "/etc/config/{{$addonName}}.yaml"] + volumeMounts: + - name: config-volume + mountPath: /etc/config + volumes: + - name: config-volume + configMap: + # Provide the name of the ConfigMap containing the files you want + # to add to the container + name: {{$addonName}} + items: + - key: {{$addonName}} + path: {{$addonName}}.yaml + restartPolicy: Never` diff --git a/templates/kubedns.go b/templates/kubedns.go new file mode 100644 index 00000000..2b430199 --- /dev/null +++ b/templates/kubedns.go @@ -0,0 +1,219 @@ +package templates + +const KubeDNSTemplate = ` +--- +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + name: kube-dns-autoscaler + namespace: kube-system + labels: + k8s-app: kube-dns-autoscaler +spec: + template: + metadata: + labels: + k8s-app: kube-dns-autoscaler + spec: + containers: + - name: autoscaler + image: {{.KubeDNSAutoScalerImage}} + resources: + requests: + cpu: "20m" + memory: "10Mi" + command: + - /cluster-proportional-autoscaler + - --namespace=kube-system + - --configmap=kube-dns-autoscaler + - --target=Deployment/kube-dns + # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate. + # If using small nodes, "nodesPerReplica" should dominate. + - --default-params={"linear":{"coresPerReplica":128,"nodesPerReplica":4,"min":1}} + - --logtostderr=true + - --v=2 + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-dns + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: kube-dns + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +spec: + # replicas: not specified here: + # 1. In order to make Addon Manager do not reconcile this replicas parameter. + # 2. Default is 1. + # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on. + strategy: + rollingUpdate: + maxSurge: 10% + maxUnavailable: 0 + selector: + matchLabels: + k8s-app: kube-dns + template: + metadata: + labels: + k8s-app: kube-dns + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" + volumes: + - name: kube-dns-config + configMap: + name: kube-dns + optional: true + containers: + - name: kubedns + image: {{.KubeDNSImage}} + resources: + # TODO: Set memory limits when we've profiled the container for large + # clusters, then set request = limit to keep this container in + # guaranteed class. Currently, this container falls into the + # "burstable" category so the kubelet doesn't backoff from restarting it. + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + livenessProbe: + httpGet: + path: /healthcheck/kubedns + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + path: /readiness + port: 8081 + scheme: HTTP + # we poll on pod startup for the Kubernetes master service and + # only setup the /readiness HTTP server once that's available. + initialDelaySeconds: 3 + timeoutSeconds: 5 + args: + - --domain={{.ClusterDomain}}. + - --dns-port=10053 + - --config-dir=/kube-dns-config + - --v=2 + env: + - name: PROMETHEUS_PORT + value: "10055" + ports: + - containerPort: 10053 + name: dns-local + protocol: UDP + - containerPort: 10053 + name: dns-tcp-local + protocol: TCP + - containerPort: 10055 + name: metrics + protocol: TCP + volumeMounts: + - name: kube-dns-config + mountPath: /kube-dns-config + - name: dnsmasq + image: {{.DNSMasqImage}} + livenessProbe: + httpGet: + path: /healthcheck/dnsmasq + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + args: + - -v=2 + - -logtostderr + - -configDir=/etc/k8s/dns/dnsmasq-nanny + - -restartDnsmasq=true + - -- + - -k + - --cache-size=1000 + - --log-facility=- + - --server=/{{.ClusterDomain}}/127.0.0.1#10053 + - --server=/in-addr.arpa/127.0.0.1#10053 + - --server=/ip6.arpa/127.0.0.1#10053 + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + # see: https://github.com/kubernetes/kubernetes/issues/29055 for details + resources: + requests: + cpu: 150m + memory: 20Mi + volumeMounts: + - name: kube-dns-config + mountPath: /etc/k8s/dns/dnsmasq-nanny + - name: sidecar + image: {{.KubednsSidecarImage}} + livenessProbe: + httpGet: + path: /metrics + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + args: + - --v=2 + - --logtostderr + - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{.ClusterDomain}},5,A + - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{.ClusterDomain}},5,A + ports: + - containerPort: 10054 + name: metrics + protocol: TCP + resources: + requests: + memory: 20Mi + cpu: 10m + dnsPolicy: Default # Don't use cluster DNS. + serviceAccountName: kube-dns +--- +apiVersion: v1 +kind: Service +metadata: + name: kube-dns + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile + kubernetes.io/name: "KubeDNS" +spec: + selector: + k8s-app: kube-dns + clusterIP: {{.ClusterDNSServer}} + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP` diff --git a/templates/templates.go b/templates/templates.go new file mode 100644 index 00000000..9db54292 --- /dev/null +++ b/templates/templates.go @@ -0,0 +1,15 @@ +package templates + +import ( + "bytes" + "text/template" +) + +func CompileTemplateFromMap(tmplt string, configMap map[string]string) (string, error) { + out := new(bytes.Buffer) + t := template.Must(template.New("compiled_template").Parse(tmplt)) + if err := t.Execute(out, configMap); err != nil { + return "", err + } + return out.String(), nil +} diff --git a/network/weave.go b/templates/weave.go similarity index 89% rename from network/weave.go rename to templates/weave.go index 4aa15aea..59a8f916 100644 --- a/network/weave.go +++ b/templates/weave.go @@ -1,13 +1,7 @@ -package network +package templates -import "github.com/rancher/rke/services" - -func GetWeaveManifest(weaveConfig map[string]string) string { - rbacConfig := "" - if weaveConfig[RBACConfig] == services.RBACAuthorizationMode { - rbacConfig = getWeaveRBACManifest() - } - return ` +const WeaveTemplate = ` +--- # This ConfigMap can be used to configure a self-hosted Weave Net installation. apiVersion: v1 kind: List @@ -41,8 +35,8 @@ items: apiVersion: v1 fieldPath: spec.nodeName - name: IPALLOC_RANGE - value: "` + weaveConfig[ClusterCIDR] + `" - image: ` + weaveConfig[WeaveImage] + ` + value: "{{.ClusterCIDR}}" + image: {{.Image}} livenessProbe: httpGet: host: 127.0.0.1 @@ -77,7 +71,7 @@ items: fieldRef: apiVersion: v1 fieldPath: spec.nodeName - image: ` + weaveConfig[WeaveCNIImage] + ` + image: {{.CNIImage}} resources: requests: cpu: 10m @@ -119,12 +113,7 @@ items: path: /run/xtables.lock updateStrategy: type: RollingUpdate - -` + rbacConfig -} - -func getWeaveRBACManifest() string { - return ` +{{- if eq .RBACConfig "rbac"}} --- apiVersion: v1 kind: ServiceAccount @@ -213,6 +202,6 @@ roleRef: subjects: - kind: ServiceAccount name: weave-net - namespace: kube-system` - -} + namespace: kube-system +{{- end}} +`