mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 15:05:27 +00:00
Update the DNS spec from upstream
This commit is contained in:
parent
d5a4a7ca14
commit
b32e6040e3
@ -51,16 +51,11 @@ func CreateEssentialAddons(cfg *kubeadmapi.MasterConfiguration, client *clientse
|
|||||||
return fmt.Errorf("error when parsing kube-proxy daemonset template: %v", err)
|
return fmt.Errorf("error when parsing kube-proxy daemonset template: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
dnsDeploymentBytes, err := kubeadmutil.ParseTemplate(KubeDNSDeployment, struct {
|
dnsDeploymentBytes, err := kubeadmutil.ParseTemplate(KubeDNSDeployment, struct{ ImageRepository, Arch, Version, DNSDomain string }{
|
||||||
ImageRepository, Arch, Version, DNSDomain string
|
|
||||||
Replicas int
|
|
||||||
}{
|
|
||||||
ImageRepository: kubeadmapi.GlobalEnvParams.RepositoryPrefix,
|
ImageRepository: kubeadmapi.GlobalEnvParams.RepositoryPrefix,
|
||||||
Arch: runtime.GOARCH,
|
Arch: runtime.GOARCH,
|
||||||
// TODO: Support larger amount of replicas?
|
Version: KubeDNSVersion,
|
||||||
Replicas: 1,
|
DNSDomain: cfg.Networking.DNSDomain,
|
||||||
Version: KubeDNSVersion,
|
|
||||||
DNSDomain: cfg.Networking.DNSDomain,
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error when parsing kube-dns deployment template: %v", err)
|
return fmt.Errorf("error when parsing kube-dns deployment template: %v", err)
|
||||||
|
@ -71,9 +71,8 @@ spec:
|
|||||||
- name: kube-proxy
|
- name: kube-proxy
|
||||||
image: {{ .Image }}
|
image: {{ .Image }}
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
# TODO: This is gonna work with hyperkube v1.6.0-alpha.2+: https://github.com/kubernetes/kubernetes/pull/41017
|
|
||||||
command:
|
command:
|
||||||
- kube-proxy
|
- /usr/local/bin/kube-proxy
|
||||||
- --kubeconfig=/var/lib/kube-proxy/kubeconfig.conf
|
- --kubeconfig=/var/lib/kube-proxy/kubeconfig.conf
|
||||||
{{ .ClusterCIDR }}
|
{{ .ClusterCIDR }}
|
||||||
securityContext:
|
securityContext:
|
||||||
@ -94,43 +93,81 @@ spec:
|
|||||||
name: kube-proxy
|
name: kube-proxy
|
||||||
`
|
`
|
||||||
|
|
||||||
KubeDNSVersion = "1.11.0"
|
KubeDNSVersion = "1.12.1"
|
||||||
|
|
||||||
KubeDNSDeployment = `
|
KubeDNSDeployment = `
|
||||||
|
|
||||||
apiVersion: extensions/v1beta1
|
apiVersion: extensions/v1beta1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
|
||||||
k8s-app: kube-dns
|
|
||||||
name: kube-dns
|
name: kube-dns
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: kube-dns
|
||||||
spec:
|
spec:
|
||||||
replicas: {{ .Replicas }}
|
# replicas: not specified here:
|
||||||
|
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
|
||||||
|
# 2. Default is 1.
|
||||||
|
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
|
||||||
|
strategy:
|
||||||
|
rollingUpdate:
|
||||||
|
maxSurge: 10%
|
||||||
|
maxUnavailable: 0
|
||||||
selector:
|
selector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
k8s-app: kube-dns
|
k8s-app: kube-dns
|
||||||
strategy:
|
|
||||||
rollingUpdate:
|
|
||||||
maxSurge: 1
|
|
||||||
maxUnavailable: 1
|
|
||||||
type: RollingUpdate
|
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
k8s-app: kube-dns
|
k8s-app: kube-dns
|
||||||
annotations:
|
annotations:
|
||||||
|
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||||
# TODO: Move this to the beta tolerations field below as soon as the Tolerations field exists in PodSpec
|
# TODO: Move this to the beta tolerations field below as soon as the Tolerations field exists in PodSpec
|
||||||
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"dedicated","value":"master","effect":"NoSchedule"}]'
|
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}, {"key":"dedicated","value":"master","effect":"NoSchedule"}]'
|
||||||
spec:
|
spec:
|
||||||
|
volumes:
|
||||||
|
- name: kube-dns-config
|
||||||
|
configMap:
|
||||||
|
name: kube-dns
|
||||||
|
optional: true
|
||||||
containers:
|
containers:
|
||||||
- name: kubedns
|
- name: kubedns
|
||||||
image: {{ .ImageRepository }}/k8s-dns-kube-dns-{{ .Arch }}:{{ .Version }}
|
image: {{ .ImageRepository }}/k8s-dns-kube-dns-{{ .Arch }}:{{ .Version }}
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
|
resources:
|
||||||
|
# TODO: Set memory limits when we've profiled the container for large
|
||||||
|
# clusters, then set request = limit to keep this container in
|
||||||
|
# guaranteed class. Currently, this container falls into the
|
||||||
|
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
||||||
|
limits:
|
||||||
|
memory: 170Mi
|
||||||
|
requests:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 70Mi
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /healthcheck/kubedns
|
||||||
|
port: 10054
|
||||||
|
scheme: HTTP
|
||||||
|
initialDelaySeconds: 60
|
||||||
|
timeoutSeconds: 5
|
||||||
|
successThreshold: 1
|
||||||
|
failureThreshold: 5
|
||||||
|
readinessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /readiness
|
||||||
|
port: 8081
|
||||||
|
scheme: HTTP
|
||||||
|
# we poll on pod startup for the Kubernetes master service and
|
||||||
|
# only setup the /readiness HTTP server once that's available.
|
||||||
|
initialDelaySeconds: 3
|
||||||
|
timeoutSeconds: 5
|
||||||
args:
|
args:
|
||||||
- --domain={{ .DNSDomain }}
|
- --domain={{ .DNSDomain }}.
|
||||||
- --dns-port=10053
|
- --dns-port=10053
|
||||||
- --config-map=kube-dns
|
- --config-dir=/kube-dns-config
|
||||||
- --v=2
|
- --v=2
|
||||||
|
# Do we need to set __PILLAR__FEDERATIONS__DOMAIN__MAP__ here?
|
||||||
env:
|
env:
|
||||||
- name: PROMETHEUS_PORT
|
- name: PROMETHEUS_PORT
|
||||||
value: "10055"
|
value: "10055"
|
||||||
@ -144,39 +181,27 @@ spec:
|
|||||||
- containerPort: 10055
|
- containerPort: 10055
|
||||||
name: metrics
|
name: metrics
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
livenessProbe:
|
volumeMounts:
|
||||||
failureThreshold: 5
|
- name: kube-dns-config
|
||||||
httpGet:
|
mountPath: /kube-dns-config
|
||||||
path: /healthcheck/kubedns
|
|
||||||
port: 10054
|
|
||||||
scheme: HTTP
|
|
||||||
initialDelaySeconds: 60
|
|
||||||
periodSeconds: 10
|
|
||||||
successThreshold: 1
|
|
||||||
timeoutSeconds: 5
|
|
||||||
readinessProbe:
|
|
||||||
failureThreshold: 3
|
|
||||||
httpGet:
|
|
||||||
path: /readiness
|
|
||||||
port: 8081
|
|
||||||
scheme: HTTP
|
|
||||||
initialDelaySeconds: 3
|
|
||||||
periodSeconds: 10
|
|
||||||
successThreshold: 1
|
|
||||||
timeoutSeconds: 5
|
|
||||||
resources:
|
|
||||||
limits:
|
|
||||||
memory: 170Mi
|
|
||||||
requests:
|
|
||||||
cpu: 100m
|
|
||||||
memory: 70Mi
|
|
||||||
- name: dnsmasq
|
- name: dnsmasq
|
||||||
image: {{ .ImageRepository }}/k8s-dns-dnsmasq-{{ .Arch }}:{{ .Version }}
|
image: {{ .ImageRepository }}/k8s-dns-dnsmasq-{{ .Arch }}:{{ .Version }}
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /healthcheck/dnsmasq
|
||||||
|
port: 10054
|
||||||
|
scheme: HTTP
|
||||||
|
initialDelaySeconds: 60
|
||||||
|
timeoutSeconds: 5
|
||||||
|
successThreshold: 1
|
||||||
|
failureThreshold: 5
|
||||||
args:
|
args:
|
||||||
- --cache-size=1000
|
- --cache-size=1000
|
||||||
- --no-resolv
|
- --no-resolv
|
||||||
- --server=127.0.0.1#10053
|
- --server=/{{ .DNSDomain }}/127.0.0.1#10053
|
||||||
|
- --server=/in-addr.arpa/127.0.0.1#10053
|
||||||
|
- --server=/ip6.arpa/127.0.0.1#10053
|
||||||
- --log-facility=-
|
- --log-facility=-
|
||||||
ports:
|
ports:
|
||||||
- containerPort: 53
|
- containerPort: 53
|
||||||
@ -185,16 +210,7 @@ spec:
|
|||||||
- containerPort: 53
|
- containerPort: 53
|
||||||
name: dns-tcp
|
name: dns-tcp
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
livenessProbe:
|
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
|
||||||
failureThreshold: 5
|
|
||||||
httpGet:
|
|
||||||
path: /healthcheck/dnsmasq
|
|
||||||
port: 10054
|
|
||||||
scheme: HTTP
|
|
||||||
initialDelaySeconds: 60
|
|
||||||
periodSeconds: 10
|
|
||||||
successThreshold: 1
|
|
||||||
timeoutSeconds: 5
|
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 150m
|
cpu: 150m
|
||||||
@ -202,6 +218,15 @@ spec:
|
|||||||
- name: sidecar
|
- name: sidecar
|
||||||
image: {{ .ImageRepository }}/k8s-dns-sidecar-{{ .Arch }}:{{ .Version }}
|
image: {{ .ImageRepository }}/k8s-dns-sidecar-{{ .Arch }}:{{ .Version }}
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /metrics
|
||||||
|
port: 10054
|
||||||
|
scheme: HTTP
|
||||||
|
initialDelaySeconds: 60
|
||||||
|
timeoutSeconds: 5
|
||||||
|
successThreshold: 1
|
||||||
|
failureThreshold: 5
|
||||||
args:
|
args:
|
||||||
- --v=2
|
- --v=2
|
||||||
- --logtostderr
|
- --logtostderr
|
||||||
@ -211,21 +236,11 @@ spec:
|
|||||||
- containerPort: 10054
|
- containerPort: 10054
|
||||||
name: metrics
|
name: metrics
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
livenessProbe:
|
|
||||||
failureThreshold: 5
|
|
||||||
httpGet:
|
|
||||||
path: /metrics
|
|
||||||
port: 10054
|
|
||||||
scheme: HTTP
|
|
||||||
initialDelaySeconds: 60
|
|
||||||
periodSeconds: 10
|
|
||||||
successThreshold: 1
|
|
||||||
timeoutSeconds: 5
|
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 10m
|
|
||||||
memory: 20Mi
|
memory: 20Mi
|
||||||
dnsPolicy: Default
|
cpu: 10m
|
||||||
|
dnsPolicy: Default # Don't use cluster DNS.
|
||||||
serviceAccountName: kube-dns
|
serviceAccountName: kube-dns
|
||||||
# tolerations:
|
# tolerations:
|
||||||
# - key: dedicated
|
# - key: dedicated
|
||||||
|
Loading…
Reference in New Issue
Block a user