Merge pull request #41409 from luxas/kubeadm_bump_dns

Automatic merge from submit-queue (batch tested with PRs 41382, 41407, 41409, 41296, 39636)

Update the DNS spec from upstream

**What this PR does / why we need it**:
Updates kubeadm to use the latest DNS spec.

**Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #

**Special notes for your reviewer**:

@bowei @MrHohn @thockin  In the future, kubedns changes should be applied to this kubeadm file as well

**Release note**:

```release-note
NONE
```

@pires @errordeveloper @dmmcquay @mikedanese
This commit is contained in:
Kubernetes Submit Queue 2017-02-14 13:04:23 -08:00 committed by GitHub
commit e63cab78ca
2 changed files with 81 additions and 71 deletions

View File

@ -51,16 +51,11 @@ func CreateEssentialAddons(cfg *kubeadmapi.MasterConfiguration, client *clientse
return fmt.Errorf("error when parsing kube-proxy daemonset template: %v", err)
}
dnsDeploymentBytes, err := kubeadmutil.ParseTemplate(KubeDNSDeployment, struct {
ImageRepository, Arch, Version, DNSDomain string
Replicas int
}{
dnsDeploymentBytes, err := kubeadmutil.ParseTemplate(KubeDNSDeployment, struct{ ImageRepository, Arch, Version, DNSDomain string }{
ImageRepository: kubeadmapi.GlobalEnvParams.RepositoryPrefix,
Arch: runtime.GOARCH,
// TODO: Support larger amount of replicas?
Replicas: 1,
Version: KubeDNSVersion,
DNSDomain: cfg.Networking.DNSDomain,
Version: KubeDNSVersion,
DNSDomain: cfg.Networking.DNSDomain,
})
if err != nil {
return fmt.Errorf("error when parsing kube-dns deployment template: %v", err)

View File

@ -71,9 +71,8 @@ spec:
- name: kube-proxy
image: {{ .Image }}
imagePullPolicy: IfNotPresent
# TODO: This is gonna work with hyperkube v1.6.0-alpha.2+: https://github.com/kubernetes/kubernetes/pull/41017
command:
- kube-proxy
- /usr/local/bin/kube-proxy
- --kubeconfig=/var/lib/kube-proxy/kubeconfig.conf
{{ .ClusterCIDR }}
securityContext:
@ -94,43 +93,81 @@ spec:
name: kube-proxy
`
KubeDNSVersion = "1.11.0"
KubeDNSVersion = "1.12.1"
KubeDNSDeployment = `
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
k8s-app: kube-dns
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
spec:
replicas: {{ .Replicas }}
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
selector:
matchLabels:
k8s-app: kube-dns
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
# TODO: Move this to the beta tolerations field below as soon as the Tolerations field exists in PodSpec
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"dedicated","value":"master","effect":"NoSchedule"}]'
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}, {"key":"dedicated","value":"master","effect":"NoSchedule"}]'
spec:
volumes:
- name: kube-dns-config
configMap:
name: kube-dns
optional: true
containers:
- name: kubedns
image: {{ .ImageRepository }}/k8s-dns-kube-dns-{{ .Arch }}:{{ .Version }}
imagePullPolicy: IfNotPresent
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
livenessProbe:
httpGet:
path: /healthcheck/kubedns
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
# we poll on pod startup for the Kubernetes master service and
# only setup the /readiness HTTP server once that's available.
initialDelaySeconds: 3
timeoutSeconds: 5
args:
- --domain={{ .DNSDomain }}
- --domain={{ .DNSDomain }}.
- --dns-port=10053
- --config-map=kube-dns
- --config-dir=/kube-dns-config
- --v=2
# Do we need to set __PILLAR__FEDERATIONS__DOMAIN__MAP__ here?
env:
- name: PROMETHEUS_PORT
value: "10055"
@ -144,39 +181,27 @@ spec:
- containerPort: 10055
name: metrics
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthcheck/kubedns
port: 10054
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
readinessProbe:
failureThreshold: 3
httpGet:
path: /readiness
port: 8081
scheme: HTTP
initialDelaySeconds: 3
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
volumeMounts:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: {{ .ImageRepository }}/k8s-dns-dnsmasq-{{ .Arch }}:{{ .Version }}
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- --cache-size=1000
- --no-resolv
- --server=127.0.0.1#10053
- --server=/{{ .DNSDomain }}/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
- --server=/ip6.arpa/127.0.0.1#10053
- --log-facility=-
ports:
- containerPort: 53
@ -185,16 +210,7 @@ spec:
- containerPort: 53
name: dns-tcp
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthcheck/dnsmasq
port: 10054
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
resources:
requests:
cpu: 150m
@ -202,6 +218,15 @@ spec:
- name: sidecar
image: {{ .ImageRepository }}/k8s-dns-sidecar-{{ .Arch }}:{{ .Version }}
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- --v=2
- --logtostderr
@ -211,21 +236,11 @@ spec:
- containerPort: 10054
name: metrics
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
resources:
requests:
cpu: 10m
memory: 20Mi
dnsPolicy: Default
cpu: 10m
dnsPolicy: Default # Don't use cluster DNS.
serviceAccountName: kube-dns
# tolerations:
# - key: dedicated