mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge pull request #29693 from bprashanth/healthz_limits
Automatic merge from submit-queue Give healthz more memory to mitigate #29688 This will recreate the rc but not the pods. At least on the clusters we patched, if the pods get recreated they'll ccome back up with the updated limits. #29688
This commit is contained in:
commit
78f7b32e66
@ -21,22 +21,22 @@
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: kube-dns-v18
|
||||
name: kube-dns-v19
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
version: v18
|
||||
version: v19
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
replicas: __PILLAR__DNS__REPLICAS__
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
version: v18
|
||||
version: v19
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
version: v18
|
||||
version: v19
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
containers:
|
||||
@ -49,10 +49,10 @@ spec:
|
||||
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
memory: 170Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
memory: 70Mi
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
@ -102,10 +102,14 @@ spec:
|
||||
# keep request = limit to keep this container in guaranteed class
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
memory: 50Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
# Note that this container shouldn't really need 50Mi of memory. The
|
||||
# limits are set higher than expected pending investigation on #29688.
|
||||
# The extra memory was stolen from the kubedns container to keep the
|
||||
# net memory requested by the pod constant.
|
||||
memory: 50Mi
|
||||
args:
|
||||
- -cmd=nslookup kubernetes.default.svc.__PILLAR__DNS__DOMAIN__ 127.0.0.1 >/dev/null && nslookup kubernetes.default.svc.__PILLAR__DNS__DOMAIN__ 127.0.0.1:10053 >/dev/null
|
||||
- -port=8080
|
||||
|
@ -21,22 +21,22 @@
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: kube-dns-v18
|
||||
name: kube-dns-v19
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
version: v18
|
||||
version: v19
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
replicas: {{ pillar['dns_replicas'] }}
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
version: v18
|
||||
version: v19
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
version: v18
|
||||
version: v19
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
containers:
|
||||
@ -49,10 +49,10 @@ spec:
|
||||
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
memory: 170Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
memory: 70Mi
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
@ -102,10 +102,14 @@ spec:
|
||||
# keep request = limit to keep this container in guaranteed class
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
memory: 50Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
# Note that this container shouldn't really need 50Mi of memory. The
|
||||
# limits are set higher than expected pending investigation on #29688.
|
||||
# The extra memory was stolen from the kubedns container to keep the
|
||||
# net memory requested by the pod constant.
|
||||
memory: 50Mi
|
||||
args:
|
||||
- -cmd=nslookup kubernetes.default.svc.{{ pillar['dns_domain'] }} 127.0.0.1 >/dev/null && nslookup kubernetes.default.svc.{{ pillar['dns_domain'] }} 127.0.0.1:10053 >/dev/null
|
||||
- -port=8080
|
||||
|
@ -21,22 +21,22 @@
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: kube-dns-v18
|
||||
name: kube-dns-v19
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
version: v18
|
||||
version: v19
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
replicas: $DNS_REPLICAS
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
version: v18
|
||||
version: v19
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
version: v18
|
||||
version: v19
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
containers:
|
||||
@ -49,10 +49,10 @@ spec:
|
||||
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
memory: 170Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
memory: 70Mi
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
@ -101,10 +101,14 @@ spec:
|
||||
# keep request = limit to keep this container in guaranteed class
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
memory: 50Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
# Note that this container shouldn't really need 50Mi of memory. The
|
||||
# limits are set higher than expected pending investigation on #29688.
|
||||
# The extra memory was stolen from the kubedns container to keep the
|
||||
# net memory requested by the pod constant.
|
||||
memory: 50Mi
|
||||
args:
|
||||
- -cmd=nslookup kubernetes.default.svc.$DNS_DOMAIN 127.0.0.1 >/dev/null && nslookup kubernetes.default.svc.$DNS_DOMAIN 127.0.0.1:10053 >/dev/null
|
||||
- -port=8080
|
||||
|
Loading…
Reference in New Issue
Block a user