1
0
mirror of https://github.com/rancher/rke.git synced 2025-07-19 09:46:38 +00:00

Add CoreDNS as addon

This commit is contained in:
Sebastiaan van Steenis 2018-07-22 11:36:21 +02:00 committed by Alena Prokharchyk
parent c229a0637d
commit 4cbca1e90a
7 changed files with 423 additions and 33 deletions

8
addons/coredns.go Normal file
View File

@ -0,0 +1,8 @@
package addons
import "github.com/rancher/rke/templates"
func GetCoreDNSManifest(CoreDNSConfig interface{}) (string, error) {
return templates.CompileTemplateFromMap(templates.CoreDNSTemplate, CoreDNSConfig)
}

View File

@ -2,19 +2,7 @@ package addons
import "github.com/rancher/rke/templates"
const (
KubeDNSImage = "KubeDNSImage"
DNSMasqImage = "DNSMasqImage"
KubeDNSSidecarImage = "KubednsSidecarImage"
KubeDNSAutoScalerImage = "KubeDNSAutoScalerImage"
KubeDNSServer = "ClusterDNSServer"
KubeDNSClusterDomain = "ClusterDomain"
MetricsServerImage = "MetricsServerImage"
RBAC = "RBAC"
MetricsServerOptions = "MetricsServerOptions"
)
func GetKubeDNSManifest(KubeDNSConfig interface{}) (string, error) {
func GetKubeDNSManifest(kubeDNSConfig map[string]string) (string, error) {
return templates.CompileTemplateFromMap(templates.KubeDNSTemplate, kubeDNSConfig)
return templates.CompileTemplateFromMap(templates.KubeDNSTemplate, KubeDNSConfig)
}

View File

@ -20,7 +20,6 @@ import (
)
const (
KubeDNSAddonResourceName = "rke-kubedns-addon"
UserAddonResourceName = "rke-user-addon"
IngressAddonResourceName = "rke-ingress-controller"
UserAddonsIncludeResourceName = "rke-user-includes-addons"
@ -31,8 +30,13 @@ const (
NginxIngressAddonAppName = "ingress-nginx"
KubeDNSAddonAppName = "kube-dns"
KubeDNSAutoscalerAppName = "kube-dns-autoscaler"
CoreDNSAutoscalerAppName = "coredns-autoscaler"
CoreDNSProvider = "coredns"
)
var DNSProviders = []string{"kubedns", "coredns"}
type ingressOptions struct {
RBACConfig string
Options map[string]string
@ -50,6 +54,30 @@ type MetricsServerOptions struct {
Version string
}
type CoreDNSOptions struct {
RBACConfig string
CoreDNSImage string
CoreDNSAutoScalerImage string
ClusterDomain string
ClusterDNSServer string
ReverseCIDRs []string
UpstreamNameservers []string
NodeSelector map[string]string
}
type KubeDNSOptions struct {
RBACConfig string
KubeDNSImage string
DNSMasqImage string
KubeDNSAutoScalerImage string
KubeDNSSidecarImage string
ClusterDomain string
ClusterDNSServer string
ReverseCIDRs []string
UpstreamNameservers []string
NodeSelector map[string]string
}
type addonError struct {
err string
isCritical bool
@ -59,12 +87,18 @@ func (e *addonError) Error() string {
return e.err
}
func getAddonResourceName(addon string) string {
AddonResourceName := "rke-" + addon + "-addon"
return AddonResourceName
}
func (c *Cluster) deployK8sAddOns(ctx context.Context) error {
if err := c.deployKubeDNS(ctx); err != nil {
if err := c.deployDNS(ctx); err != nil {
if err, ok := err.(*addonError); ok && err.isCritical {
return err
}
log.Warnf(ctx, "Failed to deploy addon execute job [%s]: %v", KubeDNSAddonResourceName, err)
log.Warnf(ctx, "Failed to deploy DNS addon execute job for provider %s: %v", c.DNS.Provider, err)
}
if err := c.deployMetricServer(ctx); err != nil {
if err, ok := err.(*addonError); ok && err.isCritical {
@ -183,24 +217,48 @@ func getAddonFromURL(yamlURL string) ([]byte, error) {
}
func (c *Cluster) deployKubeDNS(ctx context.Context) error {
log.Infof(ctx, "[addons] Setting up KubeDNS")
kubeDNSConfig := map[string]string{
addons.KubeDNSServer: c.ClusterDNSServer,
addons.KubeDNSClusterDomain: c.ClusterDomain,
addons.KubeDNSImage: c.SystemImages.KubeDNS,
addons.DNSMasqImage: c.SystemImages.DNSmasq,
addons.KubeDNSSidecarImage: c.SystemImages.KubeDNSSidecar,
addons.KubeDNSAutoScalerImage: c.SystemImages.KubeDNSAutoscaler,
addons.RBAC: c.Authorization.Mode,
log.Infof(ctx, "[addons] Setting up %s", c.DNS.Provider)
KubeDNSConfig := KubeDNSOptions{
KubeDNSImage: c.SystemImages.KubeDNS,
KubeDNSSidecarImage: c.SystemImages.KubeDNSSidecar,
KubeDNSAutoScalerImage: c.SystemImages.KubeDNSAutoscaler,
DNSMasqImage: c.SystemImages.DNSmasq,
RBACConfig: c.Authorization.Mode,
ClusterDomain: c.ClusterDomain,
ClusterDNSServer: c.ClusterDNSServer,
UpstreamNameservers: c.DNS.UpstreamNameservers,
ReverseCIDRs: c.DNS.ReverseCIDRs,
}
kubeDNSYaml, err := addons.GetKubeDNSManifest(kubeDNSConfig)
kubeDNSYaml, err := addons.GetKubeDNSManifest(KubeDNSConfig)
if err != nil {
return err
}
if err := c.doAddonDeploy(ctx, kubeDNSYaml, KubeDNSAddonResourceName, false); err != nil {
if err := c.doAddonDeploy(ctx, kubeDNSYaml, getAddonResourceName(c.DNS.Provider), false); err != nil {
return err
}
log.Infof(ctx, "[addons] KubeDNS deployed successfully")
log.Infof(ctx, "[addons] %s deployed successfully", c.DNS.Provider)
return nil
}
func (c *Cluster) deployCoreDNS(ctx context.Context) error {
log.Infof(ctx, "[addons] Setting up %s", c.DNS.Provider)
CoreDNSConfig := CoreDNSOptions{
CoreDNSImage: c.SystemImages.CoreDNS,
CoreDNSAutoScalerImage: c.SystemImages.CoreDNSAutoscaler,
RBACConfig: c.Authorization.Mode,
ClusterDomain: c.ClusterDomain,
ClusterDNSServer: c.ClusterDNSServer,
UpstreamNameservers: c.DNS.UpstreamNameservers,
ReverseCIDRs: c.DNS.ReverseCIDRs,
}
coreDNSYaml, err := addons.GetCoreDNSManifest(CoreDNSConfig)
if err != nil {
return err
}
if err := c.doAddonDeploy(ctx, coreDNSYaml, getAddonResourceName(c.DNS.Provider), false); err != nil {
return err
}
log.Infof(ctx, "[addons] CoreDNS deployed successfully..")
return nil
}
@ -392,3 +450,55 @@ func (c *Cluster) deployIngress(ctx context.Context) error {
log.Infof(ctx, "[ingress] ingress controller %s deployed successfully", c.Ingress.Provider)
return nil
}
func (c *Cluster) removeDNSProvider(ctx context.Context, dnsprovider string) error {
AddonJobExists, err := addons.AddonJobExists(getAddonResourceName(dnsprovider)+"-deploy-job", c.LocalKubeConfigPath, c.K8sWrapTransport)
if err != nil {
return err
}
if AddonJobExists {
log.Infof(ctx, "[dns] removing DNS provider %s", dnsprovider)
if err := c.doAddonDelete(ctx, getAddonResourceName(dnsprovider), false); err != nil {
return err
}
log.Infof(ctx, "[dns] DNS provider %s removed successfully", dnsprovider)
return nil
}
return nil
}
func (c *Cluster) deployDNS(ctx context.Context) error {
for _, dnsprovider := range DNSProviders {
if strings.EqualFold(dnsprovider, c.DNS.Provider) {
continue
}
if err := c.removeDNSProvider(ctx, dnsprovider); err != nil {
return err
}
}
switch DNSProvider := c.DNS.Provider; DNSProvider {
case DefaultDNSProvider:
if err := c.deployKubeDNS(ctx); err != nil {
if err, ok := err.(*addonError); ok && err.isCritical {
return err
}
log.Warnf(ctx, "Failed to deploy addon execute job [%s]: %v", getAddonResourceName(c.DNS.Provider), err)
}
log.Infof(ctx, "[dns] DNS provider %s deployed successfully", c.DNS.Provider)
return nil
case CoreDNSProvider:
if err := c.deployCoreDNS(ctx); err != nil {
if err, ok := err.(*addonError); ok && err.isCritical {
return err
}
log.Warnf(ctx, "Failed to deploy addon execute job [%s]: %v", getAddonResourceName(c.DNS.Provider), err)
}
log.Infof(ctx, "[dns] DNS provider %s deployed successfully", c.DNS.Provider)
return nil
case "none":
return nil
default:
log.Warnf(ctx, "[dns] No valid DNS provider configured: %s", c.DNS.Provider)
return nil
}
}

View File

@ -491,6 +491,7 @@ func RestartClusterPods(ctx context.Context, kubeCluster *Cluster) error {
fmt.Sprintf("%s=%s", KubeAppLabel, DefaultMonitoringProvider),
fmt.Sprintf("%s=%s", KubeAppLabel, KubeDNSAddonAppName),
fmt.Sprintf("%s=%s", KubeAppLabel, KubeDNSAutoscalerAppName),
fmt.Sprintf("%s=%s", KubeAppLabel, CoreDNSAutoscalerAppName),
}
var errgrp errgroup.Group
labelQueue := util.GetObjectQueue(labelsList)

View File

@ -45,6 +45,7 @@ const (
DefaultMonitoringProvider = "metrics-server"
DefaultEtcdBackupConfigIntervalHours = 12
DefaultEtcdBackupConfigRetention = 6
DefaultDNSProvider = "kubedns"
DefaultEtcdHeartbeatIntervalName = "heartbeat-interval"
DefaultEtcdHeartbeatIntervalValue = "500"
@ -137,7 +138,6 @@ func (c *Cluster) setClusterDefaults(ctx context.Context) error {
if len(c.Monitoring.Provider) == 0 {
c.Monitoring.Provider = DefaultMonitoringProvider
}
//set docker private registry URL
for _, pr := range c.PrivateRegistries {
if pr.URL == "" {
@ -145,10 +145,16 @@ func (c *Cluster) setClusterDefaults(ctx context.Context) error {
}
c.PrivateRegistriesMap[pr.URL] = pr
}
err := c.setClusterImageDefaults()
if err != nil {
return err
}
if len(c.DNS.Provider) == 0 {
c.DNS.Provider = DefaultDNSProvider
}
c.setClusterServicesDefaults()
c.setClusterNetworkDefaults()
c.setClusterAuthnDefaults()
@ -231,6 +237,8 @@ func (c *Cluster) setClusterImageDefaults() error {
&c.SystemImages.KubeDNSSidecar: d(imageDefaults.KubeDNSSidecar, privRegURL),
&c.SystemImages.DNSmasq: d(imageDefaults.DNSmasq, privRegURL),
&c.SystemImages.KubeDNSAutoscaler: d(imageDefaults.KubeDNSAutoscaler, privRegURL),
&c.SystemImages.CoreDNS: d(imageDefaults.CoreDNS, privRegURL),
&c.SystemImages.CoreDNSAutoscaler: d(imageDefaults.CoreDNSAutoscaler, privRegURL),
&c.SystemImages.KubernetesServicesSidecar: d(imageDefaults.KubernetesServicesSidecar, privRegURL),
&c.SystemImages.Etcd: d(imageDefaults.Etcd, privRegURL),
&c.SystemImages.Kubernetes: d(imageDefaults.Kubernetes, privRegURL),

258
templates/coredns.go Normal file
View File

@ -0,0 +1,258 @@
package templates
const CoreDNSTemplate = `
---
{{- if eq .RBACConfig "rbac"}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
{{- end }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health
kubernetes {{.ClusterDomain}} {{ if .ReverseCIDRs }}{{ .ReverseCIDRs }}{{ else }}{{ "in-addr.arpa ip6.arpa" }}{{ end }} {
pods insecure
{{- if .UpstreamNameservers }}
upstream {{range $i, $v := .UpstreamNameservers}}{{if $i}} {{end}}{{.}}{{end}}
{{- else }}
upstream
{{- end }}
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
{{- if .UpstreamNameservers }}
proxy . {{range $i, $v := .UpstreamNameservers}}{{if $i}} {{end}}{{.}}{{end}}
{{- else }}
proxy . "/etc/resolv.conf"
{{- end }}
cache 30
loop
reload
loadbalance
}
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/name: "CoreDNS"
spec:
replicas: 1
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
{{- if eq .RBACConfig "rbac"}}
serviceAccountName: coredns
{{- end }}
nodeSelector:
{{ range $k, $v := .NodeSelector }}
{{ $k }}: {{ $v }}
{{ end }}
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
containers:
- name: coredns
image: {{.CoreDNSImage}}
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: {{.ClusterDNSServer}}
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
---
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: coredns-autoscaler
namespace: kube-system
labels:
k8s-app: coredns-autoscaler
spec:
template:
metadata:
labels:
k8s-app: coredns-autoscaler
spec:
serviceAccountName: coredns-autoscaler
containers:
- name: autoscaler
image: {{.CoreDNSAutoScalerImage}}
resources:
requests:
cpu: "20m"
memory: "10Mi"
command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=coredns-autoscaler
- --target=Deployment/coredns
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
# If using small nodes, "nodesPerReplica" should dominate.
- --default-params={"linear":{"coresPerReplica":128,"nodesPerReplica":4,"min":1}}
- --logtostderr=true
- --v=2
{{- if eq .RBACConfig "rbac"}}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns-autoscaler
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:coredns-autoscaler
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list"]
- apiGroups: [""]
resources: ["replicationcontrollers/scale"]
verbs: ["get", "update"]
- apiGroups: ["extensions"]
resources: ["deployments/scale", "replicasets/scale"]
verbs: ["get", "update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:coredns-autoscaler
subjects:
- kind: ServiceAccount
name: coredns-autoscaler
namespace: kube-system
roleRef:
kind: ClusterRole
name: system:coredns-autoscaler
apiGroup: rbac.authorization.k8s.io
{{- end }}`

View File

@ -42,7 +42,7 @@ metadata:
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
{{- if eq .RBAC "rbac"}}
{{- if eq .RBACConfig "rbac"}}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
@ -205,8 +205,14 @@ spec:
- --cache-size=1000
- --log-facility=-
- --server=/{{.ClusterDomain}}/127.0.0.1#10053
{{- if .ReverseCIDRs }}
{{- range .ReverseCIDRs }}
- --server=/{{.}}/127.0.0.1#10053
{{- end }}
{{- else }}
- --server=/in-addr.arpa/127.0.0.1#10053
- --server=/ip6.arpa/127.0.0.1#10053
{{- end }}
ports:
- containerPort: 53
name: dns
@ -223,7 +229,7 @@ spec:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: {{.KubednsSidecarImage}}
image: {{.KubeDNSSidecarImage}}
livenessProbe:
httpGet:
path: /metrics
@ -269,4 +275,15 @@ spec:
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP`
protocol: TCP
{{- if .UpstreamNameservers }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-dns
namespace: kube-system
data:
upstreamNameservers: |
[{{range $i, $v := .UpstreamNameservers}}{{if $i}}, {{end}}{{printf "%q" .}}{{end}}]
{{- end }}`